diff options
Diffstat (limited to 'drivers')
38 files changed, 6371 insertions, 41 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 622fa266b29e..1a693d3f9d51 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -148,6 +148,8 @@ source "drivers/remoteproc/Kconfig" source "drivers/rpmsg/Kconfig" +source "drivers/soc/Kconfig" + source "drivers/devfreq/Kconfig" source "drivers/extcon/Kconfig" diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index a60f26400705..aaa0f2a87118 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c @@ -57,6 +57,7 @@ #define CCN_DT_PMCCNTRSR 0x0190 #define CCN_DT_PMOVSR 0x0198 #define CCN_DT_PMOVSR_CLR 0x01a0 +#define CCN_DT_PMOVSR_CLR__MASK 0x1f #define CCN_DT_PMCR 0x01a8 #define CCN_DT_PMCR__OVFL_INTR_EN (1 << 6) #define CCN_DT_PMCR__PMU_EN (1 << 0) @@ -1051,7 +1052,8 @@ static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt) struct perf_event *event = dt->pmu_counters[idx].event; int overflowed = pmovsr & BIT(idx); - WARN_ON_ONCE(overflowed && !event); + WARN_ON_ONCE(overflowed && !event && + idx != CCN_IDX_PMU_CYCLE_COUNTER); if (!event || !overflowed) continue; @@ -1087,6 +1089,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) /* Initialize DT subsystem */ ccn->dt.base = ccn->base + CCN_REGION_SIZE; spin_lock_init(&ccn->dt.config_lock); + writel(CCN_DT_PMOVSR_CLR__MASK, ccn->dt.base + CCN_DT_PMOVSR_CLR); writel(CCN_DT_CTL__DT_EN, ccn->dt.base + CCN_DT_CTL); writel(CCN_DT_PMCR__OVFL_INTR_EN | CCN_DT_PMCR__PMU_EN, ccn->dt.base + CCN_DT_PMCR); diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c index 8c96307d7363..a76d03fd577b 100644 --- a/drivers/clk/at91/clk-system.c +++ b/drivers/clk/at91/clk-system.c @@ -119,13 +119,7 @@ at91_clk_register_system(struct at91_pmc *pmc, const char *name, init.ops = &system_ops; init.parent_names = &parent_name; init.num_parents = 1; - /* - * CLK_IGNORE_UNUSED is used to avoid ddrck switch off. - * TODO : we should implement a driver supporting at91 ddr controller - * (see drivers/memory) which would request and enable the ddrck clock. - * When this is done we will be able to remove CLK_IGNORE_UNUSED flag. - */ - init.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED; + init.flags = CLK_SET_RATE_PARENT; sys->id = id; sys->hw.init = &init; diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index cfd6519df661..82a2ebe41e27 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -120,6 +120,10 @@ config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK help Use ARM global timer clock source as sched_clock +config ATMEL_PIT + select CLKSRC_OF if OF + def_bool SOC_AT91SAM9 || SOC_SAMA5 + config CLKSRC_METAG_GENERIC def_bool y if METAG help diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 7fd9fd1dff42..e566f6c7ded4 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_CLKSRC_OF) += clksrc-of.o +obj-$(CONFIG_ATMEL_PIT) += timer-atmel-pit.o obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c new file mode 100644 index 000000000000..d5289098b3df --- /dev/null +++ b/drivers/clocksource/timer-atmel-pit.c @@ -0,0 +1,296 @@ +/* + * at91sam926x_time.c - Periodic Interval Timer (PIT) for at91sam926x + * + * Copyright (C) 2005-2006 M. Amine SAYA, ATMEL Rousset, France + * Revision 2005 M. Nicolas Diremdjian, ATMEL Rousset, France + * Converted to ClockSource/ClockEvents by David Brownell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) "AT91: PIT: " fmt + +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/slab.h> + +#define AT91_PIT_MR 0x00 /* Mode Register */ +#define AT91_PIT_PITIEN BIT(25) /* Timer Interrupt Enable */ +#define AT91_PIT_PITEN BIT(24) /* Timer Enabled */ +#define AT91_PIT_PIV GENMASK(19, 0) /* Periodic Interval Value */ + +#define AT91_PIT_SR 0x04 /* Status Register */ +#define AT91_PIT_PITS BIT(0) /* Timer Status */ + +#define AT91_PIT_PIVR 0x08 /* Periodic Interval Value Register */ +#define AT91_PIT_PIIR 0x0c /* Periodic Interval Image Register */ +#define AT91_PIT_PICNT GENMASK(31, 20) /* Interval Counter */ +#define AT91_PIT_CPIV GENMASK(19, 0) /* Inverval Value */ + +#define PIT_CPIV(x) ((x) & AT91_PIT_CPIV) +#define PIT_PICNT(x) (((x) & AT91_PIT_PICNT) >> 20) + +struct pit_data { + struct clock_event_device clkevt; + struct clocksource clksrc; + + void __iomem *base; + u32 cycle; + u32 cnt; + unsigned int irq; + struct clk *mck; +}; + +static inline struct pit_data *clksrc_to_pit_data(struct clocksource *clksrc) +{ + return container_of(clksrc, struct pit_data, clksrc); +} + +static inline struct pit_data *clkevt_to_pit_data(struct clock_event_device *clkevt) +{ + return container_of(clkevt, struct pit_data, clkevt); +} + +static inline unsigned int pit_read(void __iomem *base, unsigned int reg_offset) +{ + return __raw_readl(base + reg_offset); +} + +static inline void pit_write(void __iomem *base, unsigned int reg_offset, unsigned long value) +{ + __raw_writel(value, base + reg_offset); +} + +/* + * Clocksource: just a monotonic counter of MCK/16 cycles. + * We don't care whether or not PIT irqs are enabled. + */ +static cycle_t read_pit_clk(struct clocksource *cs) +{ + struct pit_data *data = clksrc_to_pit_data(cs); + unsigned long flags; + u32 elapsed; + u32 t; + + raw_local_irq_save(flags); + elapsed = data->cnt; + t = pit_read(data->base, AT91_PIT_PIIR); + raw_local_irq_restore(flags); + + elapsed += PIT_PICNT(t) * data->cycle; + elapsed += PIT_CPIV(t); + return elapsed; +} + +/* + * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16) + */ +static void +pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev) +{ + struct pit_data *data = clkevt_to_pit_data(dev); + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + /* update clocksource counter */ + data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR)); + pit_write(data->base, AT91_PIT_MR, + (data->cycle - 1) | AT91_PIT_PITEN | AT91_PIT_PITIEN); + break; + case CLOCK_EVT_MODE_ONESHOT: + BUG(); + /* FALLTHROUGH */ + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_UNUSED: + /* disable irq, leaving the clocksource active */ + pit_write(data->base, AT91_PIT_MR, + (data->cycle - 1) | AT91_PIT_PITEN); + break; + case CLOCK_EVT_MODE_RESUME: + break; + } +} + +static void at91sam926x_pit_suspend(struct clock_event_device *cedev) +{ + struct pit_data *data = clkevt_to_pit_data(cedev); + + /* Disable timer */ + pit_write(data->base, AT91_PIT_MR, 0); +} + +static void at91sam926x_pit_reset(struct pit_data *data) +{ + /* Disable timer and irqs */ + pit_write(data->base, AT91_PIT_MR, 0); + + /* Clear any pending interrupts, wait for PIT to stop counting */ + while (PIT_CPIV(pit_read(data->base, AT91_PIT_PIVR)) != 0) + cpu_relax(); + + /* Start PIT but don't enable IRQ */ + pit_write(data->base, AT91_PIT_MR, + (data->cycle - 1) | AT91_PIT_PITEN); +} + +static void at91sam926x_pit_resume(struct clock_event_device *cedev) +{ + struct pit_data *data = clkevt_to_pit_data(cedev); + + at91sam926x_pit_reset(data); +} + +/* + * IRQ handler for the timer. + */ +static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id) +{ + struct pit_data *data = dev_id; + + /* + * irqs should be disabled here, but as the irq is shared they are only + * guaranteed to be off if the timer irq is registered first. + */ + WARN_ON_ONCE(!irqs_disabled()); + + /* The PIT interrupt may be disabled, and is shared */ + if ((data->clkevt.mode == CLOCK_EVT_MODE_PERIODIC) && + (pit_read(data->base, AT91_PIT_SR) & AT91_PIT_PITS)) { + unsigned nr_ticks; + + /* Get number of ticks performed before irq, and ack it */ + nr_ticks = PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR)); + do { + data->cnt += data->cycle; + data->clkevt.event_handler(&data->clkevt); + nr_ticks--; + } while (nr_ticks); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +/* + * Set up both clocksource and clockevent support. + */ +static void __init at91sam926x_pit_common_init(struct pit_data *data) +{ + unsigned long pit_rate; + unsigned bits; + int ret; + + /* + * Use our actual MCK to figure out how many MCK/16 ticks per + * 1/HZ period (instead of a compile-time constant LATCH). + */ + pit_rate = clk_get_rate(data->mck) / 16; + data->cycle = DIV_ROUND_CLOSEST(pit_rate, HZ); + WARN_ON(((data->cycle - 1) & ~AT91_PIT_PIV) != 0); + + /* Initialize and enable the timer */ + at91sam926x_pit_reset(data); + + /* + * Register clocksource. The high order bits of PIV are unused, + * so this isn't a 32-bit counter unless we get clockevent irqs. + */ + bits = 12 /* PICNT */ + ilog2(data->cycle) /* PIV */; + data->clksrc.mask = CLOCKSOURCE_MASK(bits); + data->clksrc.name = "pit"; + data->clksrc.rating = 175; + data->clksrc.read = read_pit_clk, + data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS, + clocksource_register_hz(&data->clksrc, pit_rate); + + /* Set up irq handler */ + ret = request_irq(data->irq, at91sam926x_pit_interrupt, + IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, + "at91_tick", data); + if (ret) + panic(pr_fmt("Unable to setup IRQ\n")); + + /* Set up and register clockevents */ + data->clkevt.name = "pit"; + data->clkevt.features = CLOCK_EVT_FEAT_PERIODIC; + data->clkevt.shift = 32; + data->clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, data->clkevt.shift); + data->clkevt.rating = 100; + data->clkevt.cpumask = cpumask_of(0); + + data->clkevt.set_mode = pit_clkevt_mode; + data->clkevt.resume = at91sam926x_pit_resume; + data->clkevt.suspend = at91sam926x_pit_suspend; + clockevents_register_device(&data->clkevt); +} + +static void __init at91sam926x_pit_dt_init(struct device_node *node) +{ + struct pit_data *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + panic(pr_fmt("Unable to allocate memory\n")); + + data->base = of_iomap(node, 0); + if (!data->base) + panic(pr_fmt("Could not map PIT address\n")); + + data->mck = of_clk_get(node, 0); + if (IS_ERR(data->mck)) + /* Fallback on clkdev for !CCF-based boards */ + data->mck = clk_get(NULL, "mck"); + + if (IS_ERR(data->mck)) + panic(pr_fmt("Unable to get mck clk\n")); + + /* Get the interrupts property */ + data->irq = irq_of_parse_and_map(node, 0); + if (!data->irq) + panic(pr_fmt("Unable to get IRQ from DT\n")); + + at91sam926x_pit_common_init(data); +} +CLOCKSOURCE_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit", + at91sam926x_pit_dt_init); + +static void __iomem *pit_base_addr; + +void __init at91sam926x_pit_init(int irq) +{ + struct pit_data *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + panic(pr_fmt("Unable to allocate memory\n")); + + data->base = pit_base_addr; + + data->mck = clk_get(NULL, "mck"); + if (IS_ERR(data->mck)) + panic(pr_fmt("Unable to get mck clk\n")); + + data->irq = irq; + + at91sam926x_pit_common_init(data); +} + +void __init at91sam926x_ioremap_pit(u32 addr) +{ + if (of_have_populated_dt()) + return; + + pit_base_addr = ioremap(addr, 16); + + if (!pit_base_addr) + panic(pr_fmt("Impossible to ioremap PIT\n")); +} diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index fd89ca982748..7072c2892d63 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -376,4 +376,13 @@ config EDAC_OCTEON_PCI Support for error detection and correction on the Cavium Octeon family of SOCs. +config EDAC_ALTERA_MC + tristate "Altera SDRAM Memory Controller EDAC" + depends on EDAC_MM_EDAC && ARCH_SOCFPGA + help + Support for error detection and correction on the + Altera SDRAM memory controller. Note that the + preloader must initialize the SDRAM before loading + the kernel. + endif # EDAC diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index c479a24d8f77..359aa499b200 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -65,3 +65,5 @@ obj-$(CONFIG_EDAC_OCTEON_PC) += octeon_edac-pc.o obj-$(CONFIG_EDAC_OCTEON_L2C) += octeon_edac-l2c.o obj-$(CONFIG_EDAC_OCTEON_LMC) += octeon_edac-lmc.o obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o + +obj-$(CONFIG_EDAC_ALTERA_MC) += altera_edac.o diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c new file mode 100644 index 000000000000..3c4929fda9d5 --- /dev/null +++ b/drivers/edac/altera_edac.c @@ -0,0 +1,410 @@ +/* + * Copyright Altera Corporation (C) 2014. All rights reserved. + * Copyright 2011-2012 Calxeda, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + * + * Adapted from the highbank_mc_edac driver. + */ + +#include <linux/ctype.h> +#include <linux/edac.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/types.h> +#include <linux/uaccess.h> + +#include "edac_core.h" +#include "edac_module.h" + +#define EDAC_MOD_STR "altera_edac" +#define EDAC_VERSION "1" + +/* SDRAM Controller CtrlCfg Register */ +#define CTLCFG_OFST 0x00 + +/* SDRAM Controller CtrlCfg Register Bit Masks */ +#define CTLCFG_ECC_EN 0x400 +#define CTLCFG_ECC_CORR_EN 0x800 +#define CTLCFG_GEN_SB_ERR 0x2000 +#define CTLCFG_GEN_DB_ERR 0x4000 + +#define CTLCFG_ECC_AUTO_EN (CTLCFG_ECC_EN | \ + CTLCFG_ECC_CORR_EN) + +/* SDRAM Controller Address Width Register */ +#define DRAMADDRW_OFST 0x2C + +/* SDRAM Controller Address Widths Field Register */ +#define DRAMADDRW_COLBIT_MASK 0x001F +#define DRAMADDRW_COLBIT_SHIFT 0 +#define DRAMADDRW_ROWBIT_MASK 0x03E0 +#define DRAMADDRW_ROWBIT_SHIFT 5 +#define DRAMADDRW_BANKBIT_MASK 0x1C00 +#define DRAMADDRW_BANKBIT_SHIFT 10 +#define DRAMADDRW_CSBIT_MASK 0xE000 +#define DRAMADDRW_CSBIT_SHIFT 13 + +/* SDRAM Controller Interface Data Width Register */ +#define DRAMIFWIDTH_OFST 0x30 + +/* SDRAM Controller Interface Data Width Defines */ +#define DRAMIFWIDTH_16B_ECC 24 +#define DRAMIFWIDTH_32B_ECC 40 + +/* SDRAM Controller DRAM Status Register */ +#define DRAMSTS_OFST 0x38 + +/* SDRAM Controller DRAM Status Register Bit Masks */ +#define DRAMSTS_SBEERR 0x04 +#define DRAMSTS_DBEERR 0x08 +#define DRAMSTS_CORR_DROP 0x10 + +/* SDRAM Controller DRAM IRQ Register */ +#define DRAMINTR_OFST 0x3C + +/* SDRAM Controller DRAM IRQ Register Bit Masks */ +#define DRAMINTR_INTREN 0x01 +#define DRAMINTR_SBEMASK 0x02 +#define DRAMINTR_DBEMASK 0x04 +#define DRAMINTR_CORRDROPMASK 0x08 +#define DRAMINTR_INTRCLR 0x10 + +/* SDRAM Controller Single Bit Error Count Register */ +#define SBECOUNT_OFST 0x40 + +/* SDRAM Controller Single Bit Error Count Register Bit Masks */ +#define SBECOUNT_MASK 0x0F + +/* SDRAM Controller Double Bit Error Count Register */ +#define DBECOUNT_OFST 0x44 + +/* SDRAM Controller Double Bit Error Count Register Bit Masks */ +#define DBECOUNT_MASK 0x0F + +/* SDRAM Controller ECC Error Address Register */ +#define ERRADDR_OFST 0x48 + +/* SDRAM Controller ECC Error Address Register Bit Masks */ +#define ERRADDR_MASK 0xFFFFFFFF + +/* Altera SDRAM Memory Controller data */ +struct altr_sdram_mc_data { + struct regmap *mc_vbase; +}; + +static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id) +{ + struct mem_ctl_info *mci = dev_id; + struct altr_sdram_mc_data *drvdata = mci->pvt_info; + u32 status, err_count, err_addr; + + /* Error Address is shared by both SBE & DBE */ + regmap_read(drvdata->mc_vbase, ERRADDR_OFST, &err_addr); + + regmap_read(drvdata->mc_vbase, DRAMSTS_OFST, &status); + + if (status & DRAMSTS_DBEERR) { + regmap_read(drvdata->mc_vbase, DBECOUNT_OFST, &err_count); + panic("\nEDAC: [%d Uncorrectable errors @ 0x%08X]\n", + err_count, err_addr); + } + if (status & DRAMSTS_SBEERR) { + regmap_read(drvdata->mc_vbase, SBECOUNT_OFST, &err_count); + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count, + err_addr >> PAGE_SHIFT, + err_addr & ~PAGE_MASK, 0, + 0, 0, -1, mci->ctl_name, ""); + } + + regmap_write(drvdata->mc_vbase, DRAMINTR_OFST, + (DRAMINTR_INTRCLR | DRAMINTR_INTREN)); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_EDAC_DEBUG +static ssize_t altr_sdr_mc_err_inject_write(struct file *file, + const char __user *data, + size_t count, loff_t *ppos) +{ + struct mem_ctl_info *mci = file->private_data; + struct altr_sdram_mc_data *drvdata = mci->pvt_info; + u32 *ptemp; + dma_addr_t dma_handle; + u32 reg, read_reg; + + ptemp = dma_alloc_coherent(mci->pdev, 16, &dma_handle, GFP_KERNEL); + if (!ptemp) { + dma_free_coherent(mci->pdev, 16, ptemp, dma_handle); + edac_printk(KERN_ERR, EDAC_MC, + "Inject: Buffer Allocation error\n"); + return -ENOMEM; + } + + regmap_read(drvdata->mc_vbase, CTLCFG_OFST, &read_reg); + read_reg &= ~(CTLCFG_GEN_SB_ERR | CTLCFG_GEN_DB_ERR); + + /* Error are injected by writing a word while the SBE or DBE + * bit in the CTLCFG register is set. Reading the word will + * trigger the SBE or DBE error and the corresponding IRQ. + */ + if (count == 3) { + edac_printk(KERN_ALERT, EDAC_MC, + "Inject Double bit error\n"); + regmap_write(drvdata->mc_vbase, CTLCFG_OFST, + (read_reg | CTLCFG_GEN_DB_ERR)); + } else { + edac_printk(KERN_ALERT, EDAC_MC, + "Inject Single bit error\n"); + regmap_write(drvdata->mc_vbase, CTLCFG_OFST, + (read_reg | CTLCFG_GEN_SB_ERR)); + } + + ptemp[0] = 0x5A5A5A5A; + ptemp[1] = 0xA5A5A5A5; + + /* Clear the error injection bits */ + regmap_write(drvdata->mc_vbase, CTLCFG_OFST, read_reg); + /* Ensure it has been written out */ + wmb(); + + /* + * To trigger the error, we need to read the data back + * (the data was written with errors above). + * The ACCESS_ONCE macros and printk are used to prevent the + * the compiler optimizing these reads out. + */ + reg = ACCESS_ONCE(ptemp[0]); + read_reg = ACCESS_ONCE(ptemp[1]); + /* Force Read */ + rmb(); + + edac_printk(KERN_ALERT, EDAC_MC, "Read Data [0x%X, 0x%X]\n", + reg, read_reg); + + dma_free_coherent(mci->pdev, 16, ptemp, dma_handle); + + return count; +} + +static const struct file_operations altr_sdr_mc_debug_inject_fops = { + .open = simple_open, + .write = altr_sdr_mc_err_inject_write, + .llseek = generic_file_llseek, +}; + +static void altr_sdr_mc_create_debugfs_nodes(struct mem_ctl_info *mci) +{ + if (mci->debugfs) + debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci, + &altr_sdr_mc_debug_inject_fops); +} +#else +static void altr_sdr_mc_create_debugfs_nodes(struct mem_ctl_info *mci) +{} +#endif + +/* Get total memory size in bytes */ +static u32 altr_sdram_get_total_mem_size(struct regmap *mc_vbase) +{ + u32 size, read_reg, row, bank, col, cs, width; + + if (regmap_read(mc_vbase, DRAMADDRW_OFST, &read_reg) < 0) + return 0; + + if (regmap_read(mc_vbase, DRAMIFWIDTH_OFST, &width) < 0) + return 0; + + col = (read_reg & DRAMADDRW_COLBIT_MASK) >> + DRAMADDRW_COLBIT_SHIFT; + row = (read_reg & DRAMADDRW_ROWBIT_MASK) >> + DRAMADDRW_ROWBIT_SHIFT; + bank = (read_reg & DRAMADDRW_BANKBIT_MASK) >> + DRAMADDRW_BANKBIT_SHIFT; + cs = (read_reg & DRAMADDRW_CSBIT_MASK) >> + DRAMADDRW_CSBIT_SHIFT; + + /* Correct for ECC as its not addressible */ + if (width == DRAMIFWIDTH_32B_ECC) + width = 32; + if (width == DRAMIFWIDTH_16B_ECC) + width = 16; + + /* calculate the SDRAM size base on this info */ + size = 1 << (row + bank + col); + size = size * cs * (width / 8); + return size; +} + +static int altr_sdram_probe(struct platform_device *pdev) +{ + struct edac_mc_layer layers[2]; + struct mem_ctl_info *mci; + struct altr_sdram_mc_data *drvdata; + struct regmap *mc_vbase; + struct dimm_info *dimm; + u32 read_reg, mem_size; + int irq; + int res = 0; + + /* Validate the SDRAM controller has ECC enabled */ + /* Grab the register range from the sdr controller in device tree */ + mc_vbase = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "altr,sdr-syscon"); + if (IS_ERR(mc_vbase)) { + edac_printk(KERN_ERR, EDAC_MC, + "regmap for altr,sdr-syscon lookup failed.\n"); + return -ENODEV; + } + + if (regmap_read(mc_vbase, CTLCFG_OFST, &read_reg) || + ((read_reg & CTLCFG_ECC_AUTO_EN) != CTLCFG_ECC_AUTO_EN)) { + edac_printk(KERN_ERR, EDAC_MC, + "No ECC/ECC disabled [0x%08X]\n", read_reg); + return -ENODEV; + } + + /* Grab memory size from device tree. */ + mem_size = altr_sdram_get_total_mem_size(mc_vbase); + if (!mem_size) { + edac_printk(KERN_ERR, EDAC_MC, + "Unable to calculate memory size\n"); + return -ENODEV; + } + + /* Ensure the SDRAM Interrupt is disabled and cleared */ + if (regmap_write(mc_vbase, DRAMINTR_OFST, DRAMINTR_INTRCLR)) { + edac_printk(KERN_ERR, EDAC_MC, + "Error clearing SDRAM ECC IRQ\n"); + return -ENODEV; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + edac_printk(KERN_ERR, EDAC_MC, + "No irq %d in DT\n", irq); + return -ENODEV; + } + + layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; + layers[0].size = 1; + layers[0].is_virt_csrow = true; + layers[1].type = EDAC_MC_LAYER_CHANNEL; + layers[1].size = 1; + layers[1].is_virt_csrow = false; + mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, + sizeof(struct altr_sdram_mc_data)); + if (!mci) + return -ENOMEM; + + mci->pdev = &pdev->dev; + drvdata = mci->pvt_info; + drvdata->mc_vbase = mc_vbase; + platform_set_drvdata(pdev, mci); + + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { + res = -ENOMEM; + goto free; + } + + mci->mtype_cap = MEM_FLAG_DDR3; + mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; + mci->edac_cap = EDAC_FLAG_SECDED; + mci->mod_name = EDAC_MOD_STR; + mci->mod_ver = EDAC_VERSION; + mci->ctl_name = dev_name(&pdev->dev); + mci->scrub_mode = SCRUB_SW_SRC; + mci->dev_name = dev_name(&pdev->dev); + + dimm = *mci->dimms; + dimm->nr_pages = ((mem_size - 1) >> PAGE_SHIFT) + 1; + dimm->grain = 8; + dimm->dtype = DEV_X8; + dimm->mtype = MEM_DDR3; + dimm->edac_mode = EDAC_SECDED; + + res = edac_mc_add_mc(mci); + if (res < 0) + goto err; + + res = devm_request_irq(&pdev->dev, irq, altr_sdram_mc_err_handler, + 0, dev_name(&pdev->dev), mci); + if (res < 0) { + edac_mc_printk(mci, KERN_ERR, + "Unable to request irq %d\n", irq); + res = -ENODEV; + goto err2; + } + + if (regmap_write(drvdata->mc_vbase, DRAMINTR_OFST, + (DRAMINTR_INTRCLR | DRAMINTR_INTREN))) { + edac_mc_printk(mci, KERN_ERR, + "Error enabling SDRAM ECC IRQ\n"); + res = -ENODEV; + goto err2; + } + + altr_sdr_mc_create_debugfs_nodes(mci); + + devres_close_group(&pdev->dev, NULL); + + return 0; + +err2: + edac_mc_del_mc(&pdev->dev); +err: + devres_release_group(&pdev->dev, NULL); +free: + edac_mc_free(mci); + edac_printk(KERN_ERR, EDAC_MC, + "EDAC Probe Failed; Error %d\n", res); + + return res; +} + +static int altr_sdram_remove(struct platform_device *pdev) +{ + struct mem_ctl_info *mci = platform_get_drvdata(pdev); + + edac_mc_del_mc(&pdev->dev); + edac_mc_free(mci); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static const struct of_device_id altr_sdram_ctrl_of_match[] = { + { .compatible = "altr,sdram-edac", }, + {}, +}; +MODULE_DEVICE_TABLE(of, altr_sdram_ctrl_of_match); + +static struct platform_driver altr_sdram_edac_driver = { + .probe = altr_sdram_probe, + .remove = altr_sdram_remove, + .driver = { + .name = "altr_sdram_edac", + .of_match_table = altr_sdram_ctrl_of_match, + }, +}; + +module_platform_driver(altr_sdram_edac_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Thor Thayer"); +MODULE_DESCRIPTION("EDAC Driver for Altera SDRAM Controller"); diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index a31a9e40eed9..78d4ff551590 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -75,6 +75,11 @@ config OR1K_PIC bool select IRQ_DOMAIN +config OMAP_IRQCHIP + bool + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + config ORION_IRQCHIP bool select IRQ_DOMAIN diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 73052ba9ca62..d0a2613c73bc 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_ARCH_MOXART) += irq-moxart.o obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o obj-$(CONFIG_OR1K_PIC) += irq-or1k-pic.o obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o +obj-$(CONFIG_OMAP_IRQCHIP) += irq-omap-intc.o obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c new file mode 100644 index 000000000000..f3814e79192d --- /dev/null +++ b/drivers/irqchip/irq-omap-intc.c @@ -0,0 +1,403 @@ +/* + * linux/arch/arm/mach-omap2/irq.c + * + * Interrupt handler for OMAP2 boards. + * + * Copyright (C) 2005 Nokia Corporation + * Author: Paul Mundt <paul.mundt@nokia.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> + +#include <asm/exception.h> +#include <linux/irqdomain.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#include "irqchip.h" + +/* Define these here for now until we drop all board-files */ +#define OMAP24XX_IC_BASE 0x480fe000 +#define OMAP34XX_IC_BASE 0x48200000 + +/* selected INTC register offsets */ + +#define INTC_REVISION 0x0000 +#define INTC_SYSCONFIG 0x0010 +#define INTC_SYSSTATUS 0x0014 +#define INTC_SIR 0x0040 +#define INTC_CONTROL 0x0048 +#define INTC_PROTECTION 0x004C +#define INTC_IDLE 0x0050 +#define INTC_THRESHOLD 0x0068 +#define INTC_MIR0 0x0084 +#define INTC_MIR_CLEAR0 0x0088 +#define INTC_MIR_SET0 0x008c +#define INTC_PENDING_IRQ0 0x0098 +#define INTC_PENDING_IRQ1 0x00b8 +#define INTC_PENDING_IRQ2 0x00d8 +#define INTC_PENDING_IRQ3 0x00f8 +#define INTC_ILR0 0x0100 + +#define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */ +#define INTCPS_NR_ILR_REGS 128 +#define INTCPS_NR_MIR_REGS 4 + +#define INTC_IDLE_FUNCIDLE (1 << 0) +#define INTC_IDLE_TURBO (1 << 1) + +#define INTC_PROTECTION_ENABLE (1 << 0) + +struct omap_intc_regs { + u32 sysconfig; + u32 protection; + u32 idle; + u32 threshold; + u32 ilr[INTCPS_NR_ILR_REGS]; + u32 mir[INTCPS_NR_MIR_REGS]; +}; +static struct omap_intc_regs intc_context; + +static struct irq_domain *domain; +static void __iomem *omap_irq_base; +static int omap_nr_pending = 3; +static int omap_nr_irqs = 96; + +static void intc_writel(u32 reg, u32 val) +{ + writel_relaxed(val, omap_irq_base + reg); +} + +static u32 intc_readl(u32 reg) +{ + return readl_relaxed(omap_irq_base + reg); +} + +void omap_intc_save_context(void) +{ + int i; + + intc_context.sysconfig = + intc_readl(INTC_SYSCONFIG); + intc_context.protection = + intc_readl(INTC_PROTECTION); + intc_context.idle = + intc_readl(INTC_IDLE); + intc_context.threshold = + intc_readl(INTC_THRESHOLD); + + for (i = 0; i < omap_nr_irqs; i++) + intc_context.ilr[i] = + intc_readl((INTC_ILR0 + 0x4 * i)); + for (i = 0; i < INTCPS_NR_MIR_REGS; i++) + intc_context.mir[i] = + intc_readl(INTC_MIR0 + (0x20 * i)); +} + +void omap_intc_restore_context(void) +{ + int i; + + intc_writel(INTC_SYSCONFIG, intc_context.sysconfig); + intc_writel(INTC_PROTECTION, intc_context.protection); + intc_writel(INTC_IDLE, intc_context.idle); + intc_writel(INTC_THRESHOLD, intc_context.threshold); + + for (i = 0; i < omap_nr_irqs; i++) + intc_writel(INTC_ILR0 + 0x4 * i, + intc_context.ilr[i]); + + for (i = 0; i < INTCPS_NR_MIR_REGS; i++) + intc_writel(INTC_MIR0 + 0x20 * i, + intc_context.mir[i]); + /* MIRs are saved and restore with other PRCM registers */ +} + +void omap3_intc_prepare_idle(void) +{ + /* + * Disable autoidle as it can stall interrupt controller, + * cf. errata ID i540 for 3430 (all revisions up to 3.1.x) + */ + intc_writel(INTC_SYSCONFIG, 0); + intc_writel(INTC_IDLE, INTC_IDLE_TURBO); +} + +void omap3_intc_resume_idle(void) +{ + /* Re-enable autoidle */ + intc_writel(INTC_SYSCONFIG, 1); + intc_writel(INTC_IDLE, 0); +} + +/* XXX: FIQ and additional INTC support (only MPU at the moment) */ +static void omap_ack_irq(struct irq_data *d) +{ + intc_writel(INTC_CONTROL, 0x1); +} + +static void omap_mask_ack_irq(struct irq_data *d) +{ + irq_gc_mask_disable_reg(d); + omap_ack_irq(d); +} + +static void __init omap_irq_soft_reset(void) +{ + unsigned long tmp; + + tmp = intc_readl(INTC_REVISION) & 0xff; + + pr_info("IRQ: Found an INTC at 0x%p (revision %ld.%ld) with %d interrupts\n", + omap_irq_base, tmp >> 4, tmp & 0xf, omap_nr_irqs); + + tmp = intc_readl(INTC_SYSCONFIG); + tmp |= 1 << 1; /* soft reset */ + intc_writel(INTC_SYSCONFIG, tmp); + + while (!(intc_readl(INTC_SYSSTATUS) & 0x1)) + /* Wait for reset to complete */; + + /* Enable autoidle */ + intc_writel(INTC_SYSCONFIG, 1 << 0); +} + +int omap_irq_pending(void) +{ + int i; + + for (i = 0; i < omap_nr_pending; i++) + if (intc_readl(INTC_PENDING_IRQ0 + (0x20 * i))) + return 1; + return 0; +} + +void omap3_intc_suspend(void) +{ + /* A pending interrupt would prevent OMAP from entering suspend */ + omap_ack_irq(NULL); +} + +static int __init omap_alloc_gc_of(struct irq_domain *d, void __iomem *base) +{ + int ret; + int i; + + ret = irq_alloc_domain_generic_chips(d, 32, 1, "INTC", + handle_level_irq, IRQ_NOREQUEST | IRQ_NOPROBE, + IRQ_LEVEL, 0); + if (ret) { + pr_warn("Failed to allocate irq chips\n"); + return ret; + } + + for (i = 0; i < omap_nr_pending; i++) { + struct irq_chip_generic *gc; + struct irq_chip_type *ct; + + gc = irq_get_domain_generic_chip(d, 32 * i); + gc->reg_base = base; + ct = gc->chip_types; + + ct->type = IRQ_TYPE_LEVEL_MASK; + ct->handler = handle_level_irq; + + ct->chip.irq_ack = omap_mask_ack_irq; + ct->chip.irq_mask = irq_gc_mask_disable_reg; + ct->chip.irq_unmask = irq_gc_unmask_enable_reg; + + ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE; + + ct->regs.enable = INTC_MIR_CLEAR0 + 32 * i; + ct->regs.disable = INTC_MIR_SET0 + 32 * i; + } + + return 0; +} + +static void __init omap_alloc_gc_legacy(void __iomem *base, + unsigned int irq_start, unsigned int num) +{ + struct irq_chip_generic *gc; + struct irq_chip_type *ct; + + gc = irq_alloc_generic_chip("INTC", 1, irq_start, base, + handle_level_irq); + ct = gc->chip_types; + ct->chip.irq_ack = omap_mask_ack_irq; + ct->chip.irq_mask = irq_gc_mask_disable_reg; + ct->chip.irq_unmask = irq_gc_unmask_enable_reg; + ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE; + + ct->regs.enable = INTC_MIR_CLEAR0; + ct->regs.disable = INTC_MIR_SET0; + irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, + IRQ_NOREQUEST | IRQ_NOPROBE, 0); +} + +static int __init omap_init_irq_of(struct device_node *node) +{ + int ret; + + omap_irq_base = of_iomap(node, 0); + if (WARN_ON(!omap_irq_base)) + return -ENOMEM; + + domain = irq_domain_add_linear(node, omap_nr_irqs, + &irq_generic_chip_ops, NULL); + + omap_irq_soft_reset(); + + ret = omap_alloc_gc_of(domain, omap_irq_base); + if (ret < 0) + irq_domain_remove(domain); + + return ret; +} + +static int __init omap_init_irq_legacy(u32 base) +{ + int j, irq_base; + + omap_irq_base = ioremap(base, SZ_4K); + if (WARN_ON(!omap_irq_base)) + return -ENOMEM; + + irq_base = irq_alloc_descs(-1, 0, omap_nr_irqs, 0); + if (irq_base < 0) { + pr_warn("Couldn't allocate IRQ numbers\n"); + irq_base = 0; + } + + domain = irq_domain_add_legacy(NULL, omap_nr_irqs, irq_base, 0, + &irq_domain_simple_ops, NULL); + + omap_irq_soft_reset(); + + for (j = 0; j < omap_nr_irqs; j += 32) + omap_alloc_gc_legacy(omap_irq_base + j, j + irq_base, 32); + + return 0; +} + +static void __init omap_irq_enable_protection(void) +{ + u32 reg; + + reg = intc_readl(INTC_PROTECTION); + reg |= INTC_PROTECTION_ENABLE; + intc_writel(INTC_PROTECTION, reg); +} + +static int __init omap_init_irq(u32 base, struct device_node *node) +{ + int ret; + + if (node) + ret = omap_init_irq_of(node); + else + ret = omap_init_irq_legacy(base); + + if (ret == 0) + omap_irq_enable_protection(); + + return ret; +} + +static asmlinkage void __exception_irq_entry +omap_intc_handle_irq(struct pt_regs *regs) +{ + u32 irqnr = 0; + int handled_irq = 0; + int i; + + do { + for (i = 0; i < omap_nr_pending; i++) { + irqnr = intc_readl(INTC_PENDING_IRQ0 + (0x20 * i)); + if (irqnr) + goto out; + } + +out: + if (!irqnr) + break; + + irqnr = intc_readl(INTC_SIR); + irqnr &= ACTIVEIRQ_MASK; + + if (irqnr) { + irqnr = irq_find_mapping(domain, irqnr); + handle_IRQ(irqnr, regs); + handled_irq = 1; + } + } while (irqnr); + + /* + * If an irq is masked or deasserted while active, we will + * keep ending up here with no irq handled. So remove it from + * the INTC with an ack. + */ + if (!handled_irq) + omap_ack_irq(NULL); +} + +void __init omap2_init_irq(void) +{ + omap_nr_irqs = 96; + omap_nr_pending = 3; + omap_init_irq(OMAP24XX_IC_BASE, NULL); + set_handle_irq(omap_intc_handle_irq); +} + +void __init omap3_init_irq(void) +{ + omap_nr_irqs = 96; + omap_nr_pending = 3; + omap_init_irq(OMAP34XX_IC_BASE, NULL); + set_handle_irq(omap_intc_handle_irq); +} + +void __init ti81xx_init_irq(void) +{ + omap_nr_irqs = 96; + omap_nr_pending = 4; + omap_init_irq(OMAP34XX_IC_BASE, NULL); + set_handle_irq(omap_intc_handle_irq); +} + +static int __init intc_of_init(struct device_node *node, + struct device_node *parent) +{ + int ret; + + omap_nr_pending = 3; + omap_nr_irqs = 96; + + if (WARN_ON(!node)) + return -ENODEV; + + if (of_device_is_compatible(node, "ti,am33xx-intc")) { + omap_nr_irqs = 128; + omap_nr_pending = 4; + } + + ret = omap_init_irq(-1, of_node_get(node)); + if (ret < 0) + return ret; + + set_handle_irq(omap_intc_handle_irq); + + return 0; +} + +IRQCHIP_DECLARE(omap2_intc, "ti,omap2-intc", intc_of_init); +IRQCHIP_DECLARE(omap3_intc, "ti,omap3-intc", intc_of_init); +IRQCHIP_DECLARE(am33xx_intc, "ti,am33xx-intc", intc_of_init); diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index f6ef7bb2dc11..90e108f9e22e 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -478,6 +478,16 @@ config LEDS_BLINKM This option enables support for the BlinkM RGB LED connected through I2C. Say Y to enable support for the BlinkM LED. +config LEDS_SYSCON + bool "LED support for LEDs on system controllers" + depends on LEDS_CLASS=y + depends on MFD_SYSCON + depends on OF + help + This option enabled support for the LEDs on syscon type + devices. This will only work with device tree enabled + devices. + config LEDS_VERSATILE tristate "LED support for the ARM Versatile and RealView" depends on ARCH_REALVIEW || ARCH_VERSATILE diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index d8cc5f2777de..822dd83ef97a 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -53,6 +53,7 @@ obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o +obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o obj-$(CONFIG_LEDS_VERSATILE) += leds-versatile.o # LED SPI Drivers diff --git a/drivers/leds/leds-syscon.c b/drivers/leds/leds-syscon.c new file mode 100644 index 000000000000..3afec79c43f4 --- /dev/null +++ b/drivers/leds/leds-syscon.c @@ -0,0 +1,166 @@ +/* + * Generic Syscon LEDs Driver + * + * Copyright (c) 2014, Linaro Limited + * Author: Linus Walleij <linus.walleij@linaro.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + * + * This driver provides system reboot functionality for APM X-Gene SoC. + * For system shutdown, this is board specify. If a board designer + * implements GPIO shutdown, use the gpio-poweroff.c driver. + */ +#include <linux/io.h> +#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/stat.h> +#include <linux/slab.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/leds.h> + +/** + * struct syscon_led - state container for syscon based LEDs + * @cdev: LED class device for this LED + * @map: regmap to access the syscon device backing this LED + * @offset: the offset into the syscon regmap for the LED register + * @mask: the bit in the register corresponding to the LED + * @state: current state of the LED + */ +struct syscon_led { + struct led_classdev cdev; + struct regmap *map; + u32 offset; + u32 mask; + bool state; +}; + +static void syscon_led_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct syscon_led *sled = + container_of(led_cdev, struct syscon_led, cdev); + u32 val; + int ret; + + if (value == LED_OFF) { + val = 0; + sled->state = false; + } else { + val = sled->mask; + sled->state = true; + } + + ret = regmap_update_bits(sled->map, sled->offset, sled->mask, val); + if (ret < 0) + dev_err(sled->cdev.dev, "error updating LED status\n"); +} + +static const struct of_device_id syscon_match[] = { + { .compatible = "syscon", }, + {}, +}; + +static int __init syscon_leds_init(void) +{ + const struct of_device_id *devid; + struct device_node *np; + struct device_node *child; + struct regmap *map; + struct platform_device *pdev; + struct device *dev; + int ret; + + np = of_find_matching_node_and_match(NULL, syscon_match, + &devid); + if (!np) + return -ENODEV; + + map = syscon_node_to_regmap(np); + if (IS_ERR(map)) + return PTR_ERR(map); + + /* + * If the map is there, the device should be there, we allocate + * memory on the syscon device's behalf here. + */ + pdev = of_find_device_by_node(np); + if (!pdev) + return -ENODEV; + dev = &pdev->dev; + + for_each_available_child_of_node(np, child) { + struct syscon_led *sled; + const char *state; + + /* Only check for register-bit-leds */ + if (of_property_match_string(child, "compatible", + "register-bit-led") < 0) + continue; + + sled = devm_kzalloc(dev, sizeof(*sled), GFP_KERNEL); + if (!sled) + return -ENOMEM; + + sled->map = map; + + if (of_property_read_u32(child, "offset", &sled->offset)) + return -EINVAL; + if (of_property_read_u32(child, "mask", &sled->mask)) + return -EINVAL; + sled->cdev.name = + of_get_property(child, "label", NULL) ? : child->name; + sled->cdev.default_trigger = + of_get_property(child, "linux,default-trigger", NULL); + + state = of_get_property(child, "default-state", NULL); + if (state) { + if (!strcmp(state, "keep")) { + u32 val; + + ret = regmap_read(map, sled->offset, &val); + if (ret < 0) + return ret; + sled->state = !!(val & sled->mask); + } else if (!strcmp(state, "on")) { + sled->state = true; + ret = regmap_update_bits(map, sled->offset, + sled->mask, + sled->mask); + if (ret < 0) + return ret; + } else { + sled->state = false; + ret = regmap_update_bits(map, sled->offset, + sled->mask, 0); + if (ret < 0) + return ret; + } + + } + sled->cdev.brightness_set = syscon_led_set; + + ret = led_classdev_register(dev, &sled->cdev); + if (ret < 0) + return ret; + + dev_info(dev, "registered LED %s\n", sled->cdev.name); + } + + return 0; +} +device_initcall(syscon_leds_init); diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c index a27e00e63a8a..bcc7ee129276 100644 --- a/drivers/mailbox/omap-mailbox.c +++ b/drivers/mailbox/omap-mailbox.c @@ -31,6 +31,7 @@ #include <linux/err.h> #include <linux/notifier.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/platform_data/mailbox-omap.h> @@ -94,6 +95,18 @@ struct omap_mbox_device { struct list_head elem; }; +struct omap_mbox_fifo_info { + int tx_id; + int tx_usr; + int tx_irq; + + int rx_id; + int rx_usr; + int rx_irq; + + const char *name; +}; + struct omap_mbox { const char *name; int irq; @@ -587,24 +600,118 @@ static int omap_mbox_unregister(struct omap_mbox_device *mdev) return 0; } +static const struct of_device_id omap_mailbox_of_match[] = { + { + .compatible = "ti,omap2-mailbox", + .data = (void *)MBOX_INTR_CFG_TYPE1, + }, + { + .compatible = "ti,omap3-mailbox", + .data = (void *)MBOX_INTR_CFG_TYPE1, + }, + { + .compatible = "ti,omap4-mailbox", + .data = (void *)MBOX_INTR_CFG_TYPE2, + }, + { + /* end */ + }, +}; +MODULE_DEVICE_TABLE(of, omap_mailbox_of_match); + static int omap_mbox_probe(struct platform_device *pdev) { struct resource *mem; int ret; struct omap_mbox **list, *mbox, *mboxblk; struct omap_mbox_pdata *pdata = pdev->dev.platform_data; - struct omap_mbox_dev_info *info; + struct omap_mbox_dev_info *info = NULL; + struct omap_mbox_fifo_info *finfo, *finfoblk; struct omap_mbox_device *mdev; struct omap_mbox_fifo *fifo; - u32 intr_type; + struct device_node *node = pdev->dev.of_node; + struct device_node *child; + const struct of_device_id *match; + u32 intr_type, info_count; + u32 num_users, num_fifos; + u32 tmp[3]; u32 l; int i; - if (!pdata || !pdata->info_cnt || !pdata->info) { + if (!node && (!pdata || !pdata->info_cnt || !pdata->info)) { pr_err("%s: platform not supported\n", __func__); return -ENODEV; } + if (node) { + match = of_match_device(omap_mailbox_of_match, &pdev->dev); + if (!match) + return -ENODEV; + intr_type = (u32)match->data; + + if (of_property_read_u32(node, "ti,mbox-num-users", + &num_users)) + return -ENODEV; + + if (of_property_read_u32(node, "ti,mbox-num-fifos", + &num_fifos)) + return -ENODEV; + + info_count = of_get_available_child_count(node); + if (!info_count) { + dev_err(&pdev->dev, "no available mbox devices found\n"); + return -ENODEV; + } + } else { /* non-DT device creation */ + info_count = pdata->info_cnt; + info = pdata->info; + intr_type = pdata->intr_type; + num_users = pdata->num_users; + num_fifos = pdata->num_fifos; + } + + finfoblk = devm_kzalloc(&pdev->dev, info_count * sizeof(*finfoblk), + GFP_KERNEL); + if (!finfoblk) + return -ENOMEM; + + finfo = finfoblk; + child = NULL; + for (i = 0; i < info_count; i++, finfo++) { + if (node) { + child = of_get_next_available_child(node, child); + ret = of_property_read_u32_array(child, "ti,mbox-tx", + tmp, ARRAY_SIZE(tmp)); + if (ret) + return ret; + finfo->tx_id = tmp[0]; + finfo->tx_irq = tmp[1]; + finfo->tx_usr = tmp[2]; + + ret = of_property_read_u32_array(child, "ti,mbox-rx", + tmp, ARRAY_SIZE(tmp)); + if (ret) + return ret; + finfo->rx_id = tmp[0]; + finfo->rx_irq = tmp[1]; + finfo->rx_usr = tmp[2]; + + finfo->name = child->name; + } else { + finfo->tx_id = info->tx_id; + finfo->rx_id = info->rx_id; + finfo->tx_usr = info->usr_id; + finfo->tx_irq = info->irq_id; + finfo->rx_usr = info->usr_id; + finfo->rx_irq = info->irq_id; + finfo->name = info->name; + info++; + } + if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos || + finfo->tx_usr >= num_users || finfo->rx_usr >= num_users) + return -EINVAL; + } + mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL); if (!mdev) return -ENOMEM; @@ -615,41 +722,40 @@ static int omap_mbox_probe(struct platform_device *pdev) return PTR_ERR(mdev->mbox_base); /* allocate one extra for marking end of list */ - list = devm_kzalloc(&pdev->dev, (pdata->info_cnt + 1) * sizeof(*list), + list = devm_kzalloc(&pdev->dev, (info_count + 1) * sizeof(*list), GFP_KERNEL); if (!list) return -ENOMEM; - mboxblk = devm_kzalloc(&pdev->dev, pdata->info_cnt * sizeof(*mbox), + mboxblk = devm_kzalloc(&pdev->dev, info_count * sizeof(*mbox), GFP_KERNEL); if (!mboxblk) return -ENOMEM; - info = pdata->info; - intr_type = pdata->intr_type; mbox = mboxblk; - for (i = 0; i < pdata->info_cnt; i++, info++) { + finfo = finfoblk; + for (i = 0; i < info_count; i++, finfo++) { fifo = &mbox->tx_fifo; - fifo->msg = MAILBOX_MESSAGE(info->tx_id); - fifo->fifo_stat = MAILBOX_FIFOSTATUS(info->tx_id); - fifo->intr_bit = MAILBOX_IRQ_NOTFULL(info->tx_id); - fifo->irqenable = MAILBOX_IRQENABLE(intr_type, info->usr_id); - fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, info->usr_id); - fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, info->usr_id); + fifo->msg = MAILBOX_MESSAGE(finfo->tx_id); + fifo->fifo_stat = MAILBOX_FIFOSTATUS(finfo->tx_id); + fifo->intr_bit = MAILBOX_IRQ_NOTFULL(finfo->tx_id); + fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->tx_usr); + fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->tx_usr); + fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->tx_usr); fifo = &mbox->rx_fifo; - fifo->msg = MAILBOX_MESSAGE(info->rx_id); - fifo->msg_stat = MAILBOX_MSGSTATUS(info->rx_id); - fifo->intr_bit = MAILBOX_IRQ_NEWMSG(info->rx_id); - fifo->irqenable = MAILBOX_IRQENABLE(intr_type, info->usr_id); - fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, info->usr_id); - fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, info->usr_id); + fifo->msg = MAILBOX_MESSAGE(finfo->rx_id); + fifo->msg_stat = MAILBOX_MSGSTATUS(finfo->rx_id); + fifo->intr_bit = MAILBOX_IRQ_NEWMSG(finfo->rx_id); + fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->rx_usr); + fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->rx_usr); + fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->rx_usr); mbox->intr_type = intr_type; mbox->parent = mdev; - mbox->name = info->name; - mbox->irq = platform_get_irq(pdev, info->irq_id); + mbox->name = finfo->name; + mbox->irq = platform_get_irq(pdev, finfo->tx_irq); if (mbox->irq < 0) return mbox->irq; list[i] = mbox++; @@ -657,8 +763,8 @@ static int omap_mbox_probe(struct platform_device *pdev) mutex_init(&mdev->cfg_lock); mdev->dev = &pdev->dev; - mdev->num_users = pdata->num_users; - mdev->num_fifos = pdata->num_fifos; + mdev->num_users = num_users; + mdev->num_fifos = num_fifos; mdev->mboxes = list; ret = omap_mbox_register(mdev); if (ret) @@ -684,6 +790,7 @@ static int omap_mbox_probe(struct platform_device *pdev) if (ret < 0) goto unregister; + devm_kfree(&pdev->dev, finfoblk); return 0; unregister: @@ -708,6 +815,7 @@ static struct platform_driver omap_mbox_driver = { .driver = { .name = "omap-mailbox", .owner = THIS_MODULE, + .of_match_table = of_match_ptr(omap_mailbox_of_match), }, }; diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index fab81a143bd7..6d91c27fd4c8 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -7,6 +7,16 @@ menuconfig MEMORY if MEMORY +config ATMEL_SDRAMC + bool "Atmel (Multi-port DDR-)SDRAM Controller" + default y + depends on ARCH_AT91 && OF + help + This driver is for Atmel SDRAM Controller or Atmel Multi-port + DDR-SDRAM Controller available on Atmel AT91SAM9 and SAMA5 SoCs. + Starting with the at91sam9g45, this controller supports SDR, DDR and + LP-DDR memories. + config TI_AEMIF tristate "Texas Instruments AEMIF driver" depends on (ARCH_DAVINCI || ARCH_KEYSTONE) && OF diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile index 4055c47f45ab..c32d31981be3 100644 --- a/drivers/memory/Makefile +++ b/drivers/memory/Makefile @@ -5,6 +5,7 @@ ifeq ($(CONFIG_DDR),y) obj-$(CONFIG_OF) += of_memory.o endif +obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o obj-$(CONFIG_TI_AEMIF) += ti-aemif.o obj-$(CONFIG_TI_EMIF) += emif.o obj-$(CONFIG_FSL_CORENET_CF) += fsl-corenet-cf.o diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c new file mode 100644 index 000000000000..fed04e8efe75 --- /dev/null +++ b/drivers/memory/atmel-sdramc.c @@ -0,0 +1,98 @@ +/* + * Atmel (Multi-port DDR-)SDRAM Controller driver + * + * Copyright (C) 2014 Atmel + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + */ + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> + +struct at91_ramc_caps { + bool has_ddrck; + bool has_mpddr_clk; +}; + +static const struct at91_ramc_caps at91rm9200_caps = { }; + +static const struct at91_ramc_caps at91sam9g45_caps = { + .has_ddrck = 1, + .has_mpddr_clk = 0, +}; + +static const struct at91_ramc_caps sama5d3_caps = { + .has_ddrck = 1, + .has_mpddr_clk = 1, +}; + +static const struct of_device_id atmel_ramc_of_match[] = { + { .compatible = "atmel,at91rm9200-sdramc", .data = &at91rm9200_caps, }, + { .compatible = "atmel,at91sam9260-sdramc", .data = &at91rm9200_caps, }, + { .compatible = "atmel,at91sam9g45-ddramc", .data = &at91sam9g45_caps, }, + { .compatible = "atmel,sama5d3-ddramc", .data = &sama5d3_caps, }, + {}, +}; +MODULE_DEVICE_TABLE(of, atmel_ramc_of_match); + +static int atmel_ramc_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + const struct at91_ramc_caps *caps; + struct clk *clk; + + match = of_match_device(atmel_ramc_of_match, &pdev->dev); + caps = match->data; + + if (caps->has_ddrck) { + clk = devm_clk_get(&pdev->dev, "ddrck"); + if (IS_ERR(clk)) + return PTR_ERR(clk); + clk_prepare_enable(clk); + } + + if (caps->has_mpddr_clk) { + clk = devm_clk_get(&pdev->dev, "mpddr"); + if (IS_ERR(clk)) { + pr_err("AT91 RAMC: couldn't get mpddr clock\n"); + return PTR_ERR(clk); + } + clk_prepare_enable(clk); + } + + return 0; +} + +static struct platform_driver atmel_ramc_driver = { + .probe = atmel_ramc_probe, + .driver = { + .name = "atmel-ramc", + .owner = THIS_MODULE, + .of_match_table = atmel_ramc_of_match, + }, +}; + +static int __init atmel_ramc_init(void) +{ + return platform_driver_register(&atmel_ramc_driver); +} +module_init(atmel_ramc_init); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@free-electrons.com>"); +MODULE_DESCRIPTION("Atmel (Multi-port DDR-)SDRAM Controller"); diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index ca41523bbebf..527a0f47ef44 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -6,15 +6,33 @@ menuconfig POWER_RESET Say Y here to enable board reset and power off +if POWER_RESET + config POWER_RESET_AS3722 bool "ams AS3722 power-off driver" - depends on MFD_AS3722 && POWER_RESET + depends on MFD_AS3722 help This driver supports turning off board via a ams AS3722 power-off. +config POWER_RESET_AT91_POWEROFF + bool "Atmel AT91 poweroff driver" + depends on ARCH_AT91 + default SOC_AT91SAM9 || SOC_SAMA5 + help + This driver supports poweroff for Atmel AT91SAM9 and SAMA5 + SoCs + +config POWER_RESET_AT91_RESET + bool "Atmel AT91 reset driver" + depends on ARCH_AT91 + default SOC_AT91SAM9 || SOC_SAMA5 + help + This driver supports restart for Atmel AT91SAM9 and SAMA5 + SoCs + config POWER_RESET_AXXIA bool "LSI Axxia reset driver" - depends on POWER_RESET && ARCH_AXXIA + depends on ARCH_AXXIA help This driver supports restart for Axxia SoC. @@ -33,7 +51,7 @@ config POWER_RESET_BRCMSTB config POWER_RESET_GPIO bool "GPIO power-off driver" - depends on OF_GPIO && POWER_RESET + depends on OF_GPIO help This driver supports turning off your board via a GPIO line. If your board needs a GPIO high/low to power down, say Y and @@ -47,13 +65,13 @@ config POWER_RESET_HISI config POWER_RESET_MSM bool "Qualcomm MSM power-off driver" - depends on POWER_RESET && ARCH_QCOM + depends on ARCH_QCOM help Power off and restart support for Qualcomm boards. config POWER_RESET_QNAP bool "QNAP power-off driver" - depends on OF_GPIO && POWER_RESET && PLAT_ORION + depends on OF_GPIO && PLAT_ORION help This driver supports turning off QNAP NAS devices by sending commands to the microcontroller which controls the main power. @@ -71,14 +89,22 @@ config POWER_RESET_RESTART config POWER_RESET_SUN6I bool "Allwinner A31 SoC reset driver" depends on ARCH_SUNXI - depends on POWER_RESET help Reboot support for the Allwinner A31 SoCs. +config POWER_RESET_VERSATILE + bool "ARM Versatile family reboot driver" + depends on ARM + depends on MFD_SYSCON + depends on OF + help + Power off and restart support for ARM Versatile family of + reference boards. + config POWER_RESET_VEXPRESS bool "ARM Versatile Express power-off and reset driver" depends on ARM || ARM64 - depends on POWER_RESET && VEXPRESS_CONFIG + depends on VEXPRESS_CONFIG help Power off and reset support for the ARM Ltd. Versatile Express boards. @@ -86,7 +112,6 @@ config POWER_RESET_VEXPRESS config POWER_RESET_XGENE bool "APM SoC X-Gene reset driver" depends on ARM64 - depends on POWER_RESET help Reboot support for the APM SoC X-Gene Eval boards. @@ -97,3 +122,4 @@ config POWER_RESET_KEYSTONE help Reboot support for the KEYSTONE SoCs. +endif diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile index a42e70edd037..73221009f2bf 100644 --- a/drivers/power/reset/Makefile +++ b/drivers/power/reset/Makefile @@ -1,4 +1,6 @@ obj-$(CONFIG_POWER_RESET_AS3722) += as3722-poweroff.o +obj-$(CONFIG_POWER_RESET_AT91_POWEROFF) += at91-poweroff.o +obj-$(CONFIG_POWER_RESET_AT91_RESET) += at91-reset.o obj-$(CONFIG_POWER_RESET_AXXIA) += axxia-reset.o obj-$(CONFIG_POWER_RESET_BRCMSTB) += brcmstb-reboot.o obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o @@ -7,6 +9,7 @@ obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o obj-$(CONFIG_POWER_RESET_SUN6I) += sun6i-reboot.o +obj-$(CONFIG_POWER_RESET_VERSATILE) += arm-versatile-reboot.o obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o obj-$(CONFIG_POWER_RESET_KEYSTONE) += keystone-reset.o diff --git a/drivers/power/reset/arm-versatile-reboot.c b/drivers/power/reset/arm-versatile-reboot.c new file mode 100644 index 000000000000..5b08bffcf1a8 --- /dev/null +++ b/drivers/power/reset/arm-versatile-reboot.c @@ -0,0 +1,111 @@ +/* + * Copyright (C) 2014 Linaro Ltd. + * + * Author: Linus Walleij <linus.walleij@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + */ +#include <linux/init.h> +#include <linux/mfd/syscon.h> +#include <linux/reboot.h> +#include <linux/regmap.h> +#include <linux/of.h> +#include <asm/system_misc.h> + +#define REALVIEW_SYS_LOCK_OFFSET 0x20 +#define REALVIEW_SYS_LOCK_VAL 0xA05F +#define REALVIEW_SYS_RESETCTL_OFFSET 0x40 + +/* + * We detect the different syscon types from the compatible strings. + */ +enum versatile_reboot { + REALVIEW_REBOOT_EB, + REALVIEW_REBOOT_PB1176, + REALVIEW_REBOOT_PB11MP, + REALVIEW_REBOOT_PBA8, + REALVIEW_REBOOT_PBX, +}; + +/* Pointer to the system controller */ +static struct regmap *syscon_regmap; +static enum versatile_reboot versatile_reboot_type; + +static const struct of_device_id versatile_reboot_of_match[] = { + { + .compatible = "arm,realview-eb-syscon", + .data = (void *)REALVIEW_REBOOT_EB, + }, + { + .compatible = "arm,realview-pb1176-syscon", + .data = (void *)REALVIEW_REBOOT_PB1176, + }, + { + .compatible = "arm,realview-pb11mp-syscon", + .data = (void *)REALVIEW_REBOOT_PB11MP, + }, + { + .compatible = "arm,realview-pba8-syscon", + .data = (void *)REALVIEW_REBOOT_PBA8, + }, + { + .compatible = "arm,realview-pbx-syscon", + .data = (void *)REALVIEW_REBOOT_PBX, + }, +}; + +static void versatile_reboot(enum reboot_mode mode, const char *cmd) +{ + /* Unlock the reset register */ + regmap_write(syscon_regmap, REALVIEW_SYS_LOCK_OFFSET, + REALVIEW_SYS_LOCK_VAL); + /* Then hit reset on the different machines */ + switch (versatile_reboot_type) { + case REALVIEW_REBOOT_EB: + regmap_write(syscon_regmap, + REALVIEW_SYS_RESETCTL_OFFSET, 0x0008); + break; + case REALVIEW_REBOOT_PB1176: + regmap_write(syscon_regmap, + REALVIEW_SYS_RESETCTL_OFFSET, 0x0100); + break; + case REALVIEW_REBOOT_PB11MP: + case REALVIEW_REBOOT_PBA8: + regmap_write(syscon_regmap, REALVIEW_SYS_RESETCTL_OFFSET, + 0x0000); + regmap_write(syscon_regmap, REALVIEW_SYS_RESETCTL_OFFSET, + 0x0004); + break; + case REALVIEW_REBOOT_PBX: + regmap_write(syscon_regmap, REALVIEW_SYS_RESETCTL_OFFSET, + 0x00f0); + regmap_write(syscon_regmap, REALVIEW_SYS_RESETCTL_OFFSET, + 0x00f4); + break; + } + dsb(); +} + +static int __init versatile_reboot_probe(void) +{ + const struct of_device_id *reboot_id; + struct device_node *np; + + np = of_find_matching_node_and_match(NULL, versatile_reboot_of_match, + &reboot_id); + if (!np) + return -ENODEV; + versatile_reboot_type = (enum versatile_reboot)reboot_id->data; + + syscon_regmap = syscon_node_to_regmap(np); + if (IS_ERR(syscon_regmap)) + return PTR_ERR(syscon_regmap); + + arm_pm_restart = versatile_reboot; + pr_info("versatile reboot driver registered\n"); + return 0; +} +device_initcall(versatile_reboot_probe); diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c new file mode 100644 index 000000000000..c61000333bb9 --- /dev/null +++ b/drivers/power/reset/at91-poweroff.c @@ -0,0 +1,156 @@ +/* + * Atmel AT91 SAM9 SoCs reset code + * + * Copyright (C) 2007 Atmel Corporation. + * Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> + * Copyright (C) 2014 Free Electrons + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/printk.h> + +#define AT91_SHDW_CR 0x00 /* Shut Down Control Register */ +#define AT91_SHDW_SHDW BIT(0) /* Shut Down command */ +#define AT91_SHDW_KEY (0xa5 << 24) /* KEY Password */ + +#define AT91_SHDW_MR 0x04 /* Shut Down Mode Register */ +#define AT91_SHDW_WKMODE0 GENMASK(2, 0) /* Wake-up 0 Mode Selection */ +#define AT91_SHDW_CPTWK0_MAX 0xf /* Maximum Counter On Wake Up 0 */ +#define AT91_SHDW_CPTWK0 (AT91_SHDW_CPTWK0_MAX << 4) /* Counter On Wake Up 0 */ +#define AT91_SHDW_CPTWK0_(x) ((x) << 4) +#define AT91_SHDW_RTTWKEN BIT(16) /* Real Time Timer Wake-up Enable */ +#define AT91_SHDW_RTCWKEN BIT(17) /* Real Time Clock Wake-up Enable */ + +#define AT91_SHDW_SR 0x08 /* Shut Down Status Register */ +#define AT91_SHDW_WAKEUP0 BIT(0) /* Wake-up 0 Status */ +#define AT91_SHDW_RTTWK BIT(16) /* Real-time Timer Wake-up */ +#define AT91_SHDW_RTCWK BIT(17) /* Real-time Clock Wake-up [SAM9RL] */ + +enum wakeup_type { + AT91_SHDW_WKMODE0_NONE = 0, + AT91_SHDW_WKMODE0_HIGH = 1, + AT91_SHDW_WKMODE0_LOW = 2, + AT91_SHDW_WKMODE0_ANYLEVEL = 3, +}; + +static const char *shdwc_wakeup_modes[] = { + [AT91_SHDW_WKMODE0_NONE] = "none", + [AT91_SHDW_WKMODE0_HIGH] = "high", + [AT91_SHDW_WKMODE0_LOW] = "low", + [AT91_SHDW_WKMODE0_ANYLEVEL] = "any", +}; + +static void __iomem *at91_shdwc_base; + +static void __init at91_wakeup_status(void) +{ + u32 reg = readl(at91_shdwc_base + AT91_SHDW_SR); + char *reason = "unknown"; + + /* Simple power-on, just bail out */ + if (!reg) + return; + + if (reg & AT91_SHDW_RTTWK) + reason = "RTT"; + else if (reg & AT91_SHDW_RTCWK) + reason = "RTC"; + + pr_info("AT91: Wake-Up source: %s\n", reason); +} + +static void at91_poweroff(void) +{ + writel(AT91_SHDW_KEY | AT91_SHDW_SHDW, at91_shdwc_base + AT91_SHDW_CR); +} + +const enum wakeup_type at91_poweroff_get_wakeup_mode(struct device_node *np) +{ + const char *pm; + int err, i; + + err = of_property_read_string(np, "atmel,wakeup-mode", &pm); + if (err < 0) + return AT91_SHDW_WKMODE0_ANYLEVEL; + + for (i = 0; i < ARRAY_SIZE(shdwc_wakeup_modes); i++) + if (!strcasecmp(pm, shdwc_wakeup_modes[i])) + return i; + + return -ENODEV; +} + +static void at91_poweroff_dt_set_wakeup_mode(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + enum wakeup_type wakeup_mode; + u32 mode = 0, tmp; + + wakeup_mode = at91_poweroff_get_wakeup_mode(np); + if (wakeup_mode < 0) { + dev_warn(&pdev->dev, "shdwc unknown wakeup mode\n"); + return; + } + + if (!of_property_read_u32(np, "atmel,wakeup-counter", &tmp)) { + if (tmp > AT91_SHDW_CPTWK0_MAX) { + dev_warn(&pdev->dev, + "shdwc wakeup counter 0x%x > 0x%x reduce it to 0x%x\n", + tmp, AT91_SHDW_CPTWK0_MAX, AT91_SHDW_CPTWK0_MAX); + tmp = AT91_SHDW_CPTWK0_MAX; + } + mode |= AT91_SHDW_CPTWK0_(tmp); + } + + if (of_property_read_bool(np, "atmel,wakeup-rtc-timer")) + mode |= AT91_SHDW_RTCWKEN; + + if (of_property_read_bool(np, "atmel,wakeup-rtt-timer")) + mode |= AT91_SHDW_RTTWKEN; + + writel(wakeup_mode | mode, at91_shdwc_base + AT91_SHDW_MR); +} + +static int at91_poweroff_probe(struct platform_device *pdev) +{ + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + at91_shdwc_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(at91_shdwc_base)) { + dev_err(&pdev->dev, "Could not map reset controller address\n"); + return PTR_ERR(at91_shdwc_base); + } + + at91_wakeup_status(); + + if (pdev->dev.of_node) + at91_poweroff_dt_set_wakeup_mode(pdev); + + pm_power_off = at91_poweroff; + + return 0; +} + +static struct of_device_id at91_poweroff_of_match[] = { + { .compatible = "atmel,at91sam9260-shdwc", }, + { .compatible = "atmel,at91sam9rl-shdwc", }, + { .compatible = "atmel,at91sam9x5-shdwc", }, + { /*sentinel*/ } +}; + +static struct platform_driver at91_poweroff_driver = { + .probe = at91_poweroff_probe, + .driver = { + .name = "at91-poweroff", + .of_match_table = at91_poweroff_of_match, + }, +}; +module_platform_driver(at91_poweroff_driver); diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c new file mode 100644 index 000000000000..3611806c9cfd --- /dev/null +++ b/drivers/power/reset/at91-reset.c @@ -0,0 +1,252 @@ +/* + * Atmel AT91 SAM9 SoCs reset code + * + * Copyright (C) 2007 Atmel Corporation. + * Copyright (C) BitBox Ltd 2010 + * Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcosoft.com> + * Copyright (C) 2014 Free Electrons + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> + +#include <asm/system_misc.h> + +#include <mach/at91sam9_ddrsdr.h> +#include <mach/at91sam9_sdramc.h> + +#define AT91_RSTC_CR 0x00 /* Reset Controller Control Register */ +#define AT91_RSTC_PROCRST BIT(0) /* Processor Reset */ +#define AT91_RSTC_PERRST BIT(2) /* Peripheral Reset */ +#define AT91_RSTC_EXTRST BIT(3) /* External Reset */ +#define AT91_RSTC_KEY (0xa5 << 24) /* KEY Password */ + +#define AT91_RSTC_SR 0x04 /* Reset Controller Status Register */ +#define AT91_RSTC_URSTS BIT(0) /* User Reset Status */ +#define AT91_RSTC_RSTTYP GENMASK(10, 8) /* Reset Type */ +#define AT91_RSTC_NRSTL BIT(16) /* NRST Pin Level */ +#define AT91_RSTC_SRCMP BIT(17) /* Software Reset Command in Progress */ + +#define AT91_RSTC_MR 0x08 /* Reset Controller Mode Register */ +#define AT91_RSTC_URSTEN BIT(0) /* User Reset Enable */ +#define AT91_RSTC_URSTIEN BIT(4) /* User Reset Interrupt Enable */ +#define AT91_RSTC_ERSTL GENMASK(11, 8) /* External Reset Length */ + +enum reset_type { + RESET_TYPE_GENERAL = 0, + RESET_TYPE_WAKEUP = 1, + RESET_TYPE_WATCHDOG = 2, + RESET_TYPE_SOFTWARE = 3, + RESET_TYPE_USER = 4, +}; + +static void __iomem *at91_ramc_base[2], *at91_rstc_base; + +/* +* unless the SDRAM is cleanly shutdown before we hit the +* reset register it can be left driving the data bus and +* killing the chance of a subsequent boot from NAND +*/ +static void at91sam9260_restart(enum reboot_mode mode, const char *cmd) +{ + asm volatile( + /* Align to cache lines */ + ".balign 32\n\t" + + /* Disable SDRAM accesses */ + "str %2, [%0, #" __stringify(AT91_SDRAMC_TR) "]\n\t" + + /* Power down SDRAM */ + "str %3, [%0, #" __stringify(AT91_SDRAMC_LPR) "]\n\t" + + /* Reset CPU */ + "str %4, [%1, #" __stringify(AT91_RSTC_CR) "]\n\t" + + "b .\n\t" + : + : "r" (at91_ramc_base[0]), + "r" (at91_rstc_base), + "r" (1), + "r" (AT91_SDRAMC_LPCB_POWER_DOWN), + "r" (AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST)); +} + +static void at91sam9g45_restart(enum reboot_mode mode, const char *cmd) +{ + asm volatile( + /* + * Test wether we have a second RAM controller to care + * about. + * + * First, test that we can dereference the virtual address. + */ + "cmp %1, #0\n\t" + "beq 1f\n\t" + + /* Then, test that the RAM controller is enabled */ + "ldr r0, [%1]\n\t" + "cmp r0, #0\n\t" + + /* Align to cache lines */ + ".balign 32\n\t" + + /* Disable SDRAM0 accesses */ + "1: str %3, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" + /* Power down SDRAM0 */ + " str %4, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" + /* Disable SDRAM1 accesses */ + " strne %3, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" + /* Power down SDRAM1 */ + " strne %4, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" + /* Reset CPU */ + " str %5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t" + + " b .\n\t" + : + : "r" (at91_ramc_base[0]), + "r" (at91_ramc_base[1]), + "r" (at91_rstc_base), + "r" (1), + "r" (AT91_DDRSDRC_LPCB_POWER_DOWN), + "r" (AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST) + : "r0"); +} + +static void __init at91_reset_status(struct platform_device *pdev) +{ + u32 reg = readl(at91_rstc_base + AT91_RSTC_SR); + char *reason; + + switch ((reg & AT91_RSTC_RSTTYP) >> 8) { + case RESET_TYPE_GENERAL: + reason = "general reset"; + break; + case RESET_TYPE_WAKEUP: + reason = "wakeup"; + break; + case RESET_TYPE_WATCHDOG: + reason = "watchdog reset"; + break; + case RESET_TYPE_SOFTWARE: + reason = "software reset"; + break; + case RESET_TYPE_USER: + reason = "user reset"; + break; + default: + reason = "unknown reset"; + break; + } + + pr_info("AT91: Starting after %s\n", reason); +} + +static struct of_device_id at91_ramc_of_match[] = { + { .compatible = "atmel,at91sam9260-sdramc", }, + { .compatible = "atmel,at91sam9g45-ddramc", }, + { .compatible = "atmel,sama5d3-ddramc", }, + { /* sentinel */ } +}; + +static struct of_device_id at91_reset_of_match[] = { + { .compatible = "atmel,at91sam9260-rstc", .data = at91sam9260_restart }, + { .compatible = "atmel,at91sam9g45-rstc", .data = at91sam9g45_restart }, + { /* sentinel */ } +}; + +static int at91_reset_of_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + struct device_node *np; + int idx = 0; + + at91_rstc_base = of_iomap(pdev->dev.of_node, 0); + if (!at91_rstc_base) { + dev_err(&pdev->dev, "Could not map reset controller address\n"); + return -ENODEV; + } + + for_each_matching_node(np, at91_ramc_of_match) { + at91_ramc_base[idx] = of_iomap(np, 0); + if (!at91_ramc_base[idx]) { + dev_err(&pdev->dev, "Could not map ram controller address\n"); + return -ENODEV; + } + idx++; + } + + match = of_match_node(at91_reset_of_match, pdev->dev.of_node); + arm_pm_restart = match->data; + + return 0; +} + +static int at91_reset_platform_probe(struct platform_device *pdev) +{ + const struct platform_device_id *match; + struct resource *res; + int idx = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + at91_rstc_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(at91_rstc_base)) { + dev_err(&pdev->dev, "Could not map reset controller address\n"); + return PTR_ERR(at91_rstc_base); + } + + for (idx = 0; idx < 2; idx++) { + res = platform_get_resource(pdev, IORESOURCE_MEM, idx + 1 ); + at91_ramc_base[idx] = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + if (IS_ERR(at91_ramc_base[idx])) { + dev_err(&pdev->dev, "Could not map ram controller address\n"); + return PTR_ERR(at91_ramc_base[idx]); + } + } + + match = platform_get_device_id(pdev); + arm_pm_restart = (void (*)(enum reboot_mode, const char*)) + match->driver_data; + + return 0; +} + +static int at91_reset_probe(struct platform_device *pdev) +{ + int ret; + + if (pdev->dev.of_node) + ret = at91_reset_of_probe(pdev); + else + ret = at91_reset_platform_probe(pdev); + + if (ret) + return ret; + + at91_reset_status(pdev); + + return 0; +} + +static struct platform_device_id at91_reset_plat_match[] = { + { "at91-sam9260-reset", (unsigned long)at91sam9260_restart }, + { "at91-sam9g45-reset", (unsigned long)at91sam9g45_restart }, + { /* sentinel */ } +}; + +static struct platform_driver at91_reset_driver = { + .probe = at91_reset_probe, + .driver = { + .name = "at91-reset", + .of_match_table = at91_reset_of_match, + }, + .id_table = at91_reset_plat_match, +}; +module_platform_driver(at91_reset_driver); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index fae9464eed9c..1bea0fc43464 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -1175,9 +1175,16 @@ config RTC_DRV_SUN4V If you say Y here you will get support for the Hypervisor based RTC on SUN4V systems. +config RTC_DRV_SUN6I + tristate "Allwinner A31 RTC" + depends on MACH_SUN6I || MACH_SUN8I + help + If you say Y here you will get support for the RTC found on + Allwinner A31. + config RTC_DRV_SUNXI tristate "Allwinner sun4i/sun7i RTC" - depends on ARCH_SUNXI + depends on MACH_SUN4I || MACH_SUN7I help If you say Y here you will get support for the RTC found on Allwinner A10/A20. diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 56f061c7c815..9055b7dd3dc5 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -128,6 +128,7 @@ obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o +obj-$(CONFIG_RTC_DRV_SUN6I) += rtc-sun6i.o obj-$(CONFIG_RTC_DRV_SUNXI) += rtc-sunxi.o obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c new file mode 100644 index 000000000000..c169a2cd4727 --- /dev/null +++ b/drivers/rtc/rtc-sun6i.c @@ -0,0 +1,447 @@ +/* + * An RTC driver for Allwinner A31/A23 + * + * Copyright (c) 2014, Chen-Yu Tsai <wens@csie.org> + * + * based on rtc-sunxi.c + * + * An RTC driver for Allwinner A10/A20 + * + * Copyright (c) 2013, Carlo Caione <carlo.caione@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/rtc.h> +#include <linux/types.h> + +/* Control register */ +#define SUN6I_LOSC_CTRL 0x0000 +#define SUN6I_LOSC_CTRL_ALM_DHMS_ACC BIT(9) +#define SUN6I_LOSC_CTRL_RTC_HMS_ACC BIT(8) +#define SUN6I_LOSC_CTRL_RTC_YMD_ACC BIT(7) +#define SUN6I_LOSC_CTRL_ACC_MASK GENMASK(9, 7) + +/* RTC */ +#define SUN6I_RTC_YMD 0x0010 +#define SUN6I_RTC_HMS 0x0014 + +/* Alarm 0 (counter) */ +#define SUN6I_ALRM_COUNTER 0x0020 +#define SUN6I_ALRM_CUR_VAL 0x0024 +#define SUN6I_ALRM_EN 0x0028 +#define SUN6I_ALRM_EN_CNT_EN BIT(0) +#define SUN6I_ALRM_IRQ_EN 0x002c +#define SUN6I_ALRM_IRQ_EN_CNT_IRQ_EN BIT(0) +#define SUN6I_ALRM_IRQ_STA 0x0030 +#define SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND BIT(0) + +/* Alarm 1 (wall clock) */ +#define SUN6I_ALRM1_EN 0x0044 +#define SUN6I_ALRM1_IRQ_EN 0x0048 +#define SUN6I_ALRM1_IRQ_STA 0x004c +#define SUN6I_ALRM1_IRQ_STA_WEEK_IRQ_PEND BIT(0) + +/* Alarm config */ +#define SUN6I_ALARM_CONFIG 0x0050 +#define SUN6I_ALARM_CONFIG_WAKEUP BIT(0) + +/* + * Get date values + */ +#define SUN6I_DATE_GET_DAY_VALUE(x) ((x) & 0x0000001f) +#define SUN6I_DATE_GET_MON_VALUE(x) (((x) & 0x00000f00) >> 8) +#define SUN6I_DATE_GET_YEAR_VALUE(x) (((x) & 0x003f0000) >> 16) +#define SUN6I_LEAP_GET_VALUE(x) (((x) & 0x00400000) >> 22) + +/* + * Get time values + */ +#define SUN6I_TIME_GET_SEC_VALUE(x) ((x) & 0x0000003f) +#define SUN6I_TIME_GET_MIN_VALUE(x) (((x) & 0x00003f00) >> 8) +#define SUN6I_TIME_GET_HOUR_VALUE(x) (((x) & 0x001f0000) >> 16) + +/* + * Set date values + */ +#define SUN6I_DATE_SET_DAY_VALUE(x) ((x) & 0x0000001f) +#define SUN6I_DATE_SET_MON_VALUE(x) ((x) << 8 & 0x00000f00) +#define SUN6I_DATE_SET_YEAR_VALUE(x) ((x) << 16 & 0x003f0000) +#define SUN6I_LEAP_SET_VALUE(x) ((x) << 22 & 0x00400000) + +/* + * Set time values + */ +#define SUN6I_TIME_SET_SEC_VALUE(x) ((x) & 0x0000003f) +#define SUN6I_TIME_SET_MIN_VALUE(x) ((x) << 8 & 0x00003f00) +#define SUN6I_TIME_SET_HOUR_VALUE(x) ((x) << 16 & 0x001f0000) + +/* + * The year parameter passed to the driver is usually an offset relative to + * the year 1900. This macro is used to convert this offset to another one + * relative to the minimum year allowed by the hardware. + * + * The year range is 1970 - 2033. This range is selected to match Allwinner's + * driver, even though it is somewhat limited. + */ +#define SUN6I_YEAR_MIN 1970 +#define SUN6I_YEAR_MAX 2033 +#define SUN6I_YEAR_OFF (SUN6I_YEAR_MIN - 1900) + +struct sun6i_rtc_dev { + struct rtc_device *rtc; + struct device *dev; + void __iomem *base; + int irq; + unsigned long alarm; +}; + +static irqreturn_t sun6i_rtc_alarmirq(int irq, void *id) +{ + struct sun6i_rtc_dev *chip = (struct sun6i_rtc_dev *) id; + u32 val; + + val = readl(chip->base + SUN6I_ALRM_IRQ_STA); + + if (val & SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND) { + val |= SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND; + writel(val, chip->base + SUN6I_ALRM_IRQ_STA); + + rtc_update_irq(chip->rtc, 1, RTC_AF | RTC_IRQF); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static void sun6i_rtc_setaie(int to, struct sun6i_rtc_dev *chip) +{ + u32 alrm_val = 0; + u32 alrm_irq_val = 0; + u32 alrm_wake_val = 0; + + if (to) { + alrm_val = SUN6I_ALRM_EN_CNT_EN; + alrm_irq_val = SUN6I_ALRM_IRQ_EN_CNT_IRQ_EN; + alrm_wake_val = SUN6I_ALARM_CONFIG_WAKEUP; + } else { + writel(SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND, + chip->base + SUN6I_ALRM_IRQ_STA); + } + + writel(alrm_val, chip->base + SUN6I_ALRM_EN); + writel(alrm_irq_val, chip->base + SUN6I_ALRM_IRQ_EN); + writel(alrm_wake_val, chip->base + SUN6I_ALARM_CONFIG); +} + +static int sun6i_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) +{ + struct sun6i_rtc_dev *chip = dev_get_drvdata(dev); + u32 date, time; + + /* + * read again in case it changes + */ + do { + date = readl(chip->base + SUN6I_RTC_YMD); + time = readl(chip->base + SUN6I_RTC_HMS); + } while ((date != readl(chip->base + SUN6I_RTC_YMD)) || + (time != readl(chip->base + SUN6I_RTC_HMS))); + + rtc_tm->tm_sec = SUN6I_TIME_GET_SEC_VALUE(time); + rtc_tm->tm_min = SUN6I_TIME_GET_MIN_VALUE(time); + rtc_tm->tm_hour = SUN6I_TIME_GET_HOUR_VALUE(time); + + rtc_tm->tm_mday = SUN6I_DATE_GET_DAY_VALUE(date); + rtc_tm->tm_mon = SUN6I_DATE_GET_MON_VALUE(date); + rtc_tm->tm_year = SUN6I_DATE_GET_YEAR_VALUE(date); + + rtc_tm->tm_mon -= 1; + + /* + * switch from (data_year->min)-relative offset to + * a (1900)-relative one + */ + rtc_tm->tm_year += SUN6I_YEAR_OFF; + + return rtc_valid_tm(rtc_tm); +} + +static int sun6i_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm) +{ + struct sun6i_rtc_dev *chip = dev_get_drvdata(dev); + u32 alrm_st; + u32 alrm_en; + + alrm_en = readl(chip->base + SUN6I_ALRM_IRQ_EN); + alrm_st = readl(chip->base + SUN6I_ALRM_IRQ_STA); + wkalrm->enabled = !!(alrm_en & SUN6I_ALRM_EN_CNT_EN); + wkalrm->pending = !!(alrm_st & SUN6I_ALRM_EN_CNT_EN); + rtc_time_to_tm(chip->alarm, &wkalrm->time); + + return 0; +} + +static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm) +{ + struct sun6i_rtc_dev *chip = dev_get_drvdata(dev); + struct rtc_time *alrm_tm = &wkalrm->time; + struct rtc_time tm_now; + unsigned long time_now = 0; + unsigned long time_set = 0; + unsigned long time_gap = 0; + int ret = 0; + + ret = sun6i_rtc_gettime(dev, &tm_now); + if (ret < 0) { + dev_err(dev, "Error in getting time\n"); + return -EINVAL; + } + + rtc_tm_to_time(alrm_tm, &time_set); + rtc_tm_to_time(&tm_now, &time_now); + if (time_set <= time_now) { + dev_err(dev, "Date to set in the past\n"); + return -EINVAL; + } + + time_gap = time_set - time_now; + + if (time_gap > U32_MAX) { + dev_err(dev, "Date too far in the future\n"); + return -EINVAL; + } + + sun6i_rtc_setaie(0, chip); + writel(0, chip->base + SUN6I_ALRM_COUNTER); + usleep_range(100, 300); + + writel(time_gap, chip->base + SUN6I_ALRM_COUNTER); + chip->alarm = time_set; + + sun6i_rtc_setaie(wkalrm->enabled, chip); + + return 0; +} + +static int sun6i_rtc_wait(struct sun6i_rtc_dev *chip, int offset, + unsigned int mask, unsigned int ms_timeout) +{ + const unsigned long timeout = jiffies + msecs_to_jiffies(ms_timeout); + u32 reg; + + do { + reg = readl(chip->base + offset); + reg &= mask; + + if (!reg) + return 0; + + } while (time_before(jiffies, timeout)); + + return -ETIMEDOUT; +} + +static int sun6i_rtc_settime(struct device *dev, struct rtc_time *rtc_tm) +{ + struct sun6i_rtc_dev *chip = dev_get_drvdata(dev); + u32 date = 0; + u32 time = 0; + int year; + + year = rtc_tm->tm_year + 1900; + if (year < SUN6I_YEAR_MIN || year > SUN6I_YEAR_MAX) { + dev_err(dev, "rtc only supports year in range %d - %d\n", + SUN6I_YEAR_MIN, SUN6I_YEAR_MAX); + return -EINVAL; + } + + rtc_tm->tm_year -= SUN6I_YEAR_OFF; + rtc_tm->tm_mon += 1; + + date = SUN6I_DATE_SET_DAY_VALUE(rtc_tm->tm_mday) | + SUN6I_DATE_SET_MON_VALUE(rtc_tm->tm_mon) | + SUN6I_DATE_SET_YEAR_VALUE(rtc_tm->tm_year); + + if (is_leap_year(year)) + date |= SUN6I_LEAP_SET_VALUE(1); + + time = SUN6I_TIME_SET_SEC_VALUE(rtc_tm->tm_sec) | + SUN6I_TIME_SET_MIN_VALUE(rtc_tm->tm_min) | + SUN6I_TIME_SET_HOUR_VALUE(rtc_tm->tm_hour); + + /* Check whether registers are writable */ + if (sun6i_rtc_wait(chip, SUN6I_LOSC_CTRL, + SUN6I_LOSC_CTRL_ACC_MASK, 50)) { + dev_err(dev, "rtc is still busy.\n"); + return -EBUSY; + } + + writel(time, chip->base + SUN6I_RTC_HMS); + + /* + * After writing the RTC HH-MM-SS register, the + * SUN6I_LOSC_CTRL_RTC_HMS_ACC bit is set and it will not + * be cleared until the real writing operation is finished + */ + + if (sun6i_rtc_wait(chip, SUN6I_LOSC_CTRL, + SUN6I_LOSC_CTRL_RTC_HMS_ACC, 50)) { + dev_err(dev, "Failed to set rtc time.\n"); + return -ETIMEDOUT; + } + + writel(date, chip->base + SUN6I_RTC_YMD); + + /* + * After writing the RTC YY-MM-DD register, the + * SUN6I_LOSC_CTRL_RTC_YMD_ACC bit is set and it will not + * be cleared until the real writing operation is finished + */ + + if (sun6i_rtc_wait(chip, SUN6I_LOSC_CTRL, + SUN6I_LOSC_CTRL_RTC_YMD_ACC, 50)) { + dev_err(dev, "Failed to set rtc time.\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int sun6i_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct sun6i_rtc_dev *chip = dev_get_drvdata(dev); + + if (!enabled) + sun6i_rtc_setaie(enabled, chip); + + return 0; +} + +static const struct rtc_class_ops sun6i_rtc_ops = { + .read_time = sun6i_rtc_gettime, + .set_time = sun6i_rtc_settime, + .read_alarm = sun6i_rtc_getalarm, + .set_alarm = sun6i_rtc_setalarm, + .alarm_irq_enable = sun6i_rtc_alarm_irq_enable +}; + +static int sun6i_rtc_probe(struct platform_device *pdev) +{ + struct sun6i_rtc_dev *chip; + struct resource *res; + int ret; + + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + platform_set_drvdata(pdev, chip); + chip->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + chip->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(chip->base)) + return PTR_ERR(chip->base); + + chip->irq = platform_get_irq(pdev, 0); + if (chip->irq < 0) { + dev_err(&pdev->dev, "No IRQ resource\n"); + return chip->irq; + } + + ret = devm_request_irq(&pdev->dev, chip->irq, sun6i_rtc_alarmirq, + 0, dev_name(&pdev->dev), chip); + if (ret) { + dev_err(&pdev->dev, "Could not request IRQ\n"); + return ret; + } + + /* clear the alarm counter value */ + writel(0, chip->base + SUN6I_ALRM_COUNTER); + + /* disable counter alarm */ + writel(0, chip->base + SUN6I_ALRM_EN); + + /* disable counter alarm interrupt */ + writel(0, chip->base + SUN6I_ALRM_IRQ_EN); + + /* disable week alarm */ + writel(0, chip->base + SUN6I_ALRM1_EN); + + /* disable week alarm interrupt */ + writel(0, chip->base + SUN6I_ALRM1_IRQ_EN); + + /* clear counter alarm pending interrupts */ + writel(SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND, + chip->base + SUN6I_ALRM_IRQ_STA); + + /* clear week alarm pending interrupts */ + writel(SUN6I_ALRM1_IRQ_STA_WEEK_IRQ_PEND, + chip->base + SUN6I_ALRM1_IRQ_STA); + + /* disable alarm wakeup */ + writel(0, chip->base + SUN6I_ALARM_CONFIG); + + chip->rtc = rtc_device_register("rtc-sun6i", &pdev->dev, + &sun6i_rtc_ops, THIS_MODULE); + if (IS_ERR(chip->rtc)) { + dev_err(&pdev->dev, "unable to register device\n"); + return PTR_ERR(chip->rtc); + } + + dev_info(&pdev->dev, "RTC enabled\n"); + + return 0; +} + +static int sun6i_rtc_remove(struct platform_device *pdev) +{ + struct sun6i_rtc_dev *chip = platform_get_drvdata(pdev); + + rtc_device_unregister(chip->rtc); + + return 0; +} + +static const struct of_device_id sun6i_rtc_dt_ids[] = { + { .compatible = "allwinner,sun6i-a31-rtc" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, sun6i_rtc_dt_ids); + +static struct platform_driver sun6i_rtc_driver = { + .probe = sun6i_rtc_probe, + .remove = sun6i_rtc_remove, + .driver = { + .name = "sun6i-rtc", + .of_match_table = sun6i_rtc_dt_ids, + }, +}; + +module_platform_driver(sun6i_rtc_driver); + +MODULE_DESCRIPTION("sun6i RTC driver"); +MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index c8543855aa82..76d6bd4da138 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -1,5 +1,7 @@ menu "SOC (System On Chip) specific Drivers" source "drivers/soc/qcom/Kconfig" +source "drivers/soc/ti/Kconfig" +source "drivers/soc/versatile/Kconfig" endmenu diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 3b1b95d932d1..063113d0bd38 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -4,3 +4,5 @@ obj-$(CONFIG_ARCH_QCOM) += qcom/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ +obj-$(CONFIG_SOC_TI) += ti/ +obj-$(CONFIG_PLAT_VERSATILE) += versatile/ diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig new file mode 100644 index 000000000000..7266b2165183 --- /dev/null +++ b/drivers/soc/ti/Kconfig @@ -0,0 +1,31 @@ +# +# TI SOC drivers +# +menuconfig SOC_TI + bool "TI SOC drivers support" + +if SOC_TI + +config KEYSTONE_NAVIGATOR_QMSS + tristate "Keystone Queue Manager Sub System" + depends on ARCH_KEYSTONE + help + Say y here to support the Keystone multicore Navigator Queue + Manager support. The Queue Manager is a hardware module that + is responsible for accelerating management of the packet queues. + Packets are queued/de-queued by writing/reading descriptor address + to a particular memory mapped location in the Queue Manager module. + + If unsure, say N. + +config KEYSTONE_NAVIGATOR_DMA + tristate "TI Keystone Navigator Packet DMA support" + depends on ARCH_KEYSTONE + help + Say y tp enable support for the Keystone Navigator Packet DMA on + on Keystone family of devices. It sets up the dma channels for the + Queue Manager Sub System. + + If unsure, say N. + +endif # SOC_TI diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile new file mode 100644 index 000000000000..6bed611e1934 --- /dev/null +++ b/drivers/soc/ti/Makefile @@ -0,0 +1,5 @@ +# +# TI Keystone SOC drivers +# +obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss_queue.o knav_qmss_acc.o +obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c new file mode 100644 index 000000000000..17264275f32b --- /dev/null +++ b/drivers/soc/ti/knav_dma.c @@ -0,0 +1,815 @@ +/* + * Copyright (C) 2014 Texas Instruments Incorporated + * Authors: Santosh Shilimkar <santosh.shilimkar@ti.com> + * Sandeep Nair <sandeep_n@ti.com> + * Cyril Chemparathy <cyril@ti.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/io.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/dma-direction.h> +#include <linux/interrupt.h> +#include <linux/pm_runtime.h> +#include <linux/of_dma.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/soc/ti/knav_dma.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> + +#define REG_MASK 0xffffffff + +#define DMA_LOOPBACK BIT(31) +#define DMA_ENABLE BIT(31) +#define DMA_TEARDOWN BIT(30) + +#define DMA_TX_FILT_PSWORDS BIT(29) +#define DMA_TX_FILT_EINFO BIT(30) +#define DMA_TX_PRIO_SHIFT 0 +#define DMA_RX_PRIO_SHIFT 16 +#define DMA_PRIO_MASK GENMASK(3, 0) +#define DMA_PRIO_DEFAULT 0 +#define DMA_RX_TIMEOUT_DEFAULT 17500 /* cycles */ +#define DMA_RX_TIMEOUT_MASK GENMASK(16, 0) +#define DMA_RX_TIMEOUT_SHIFT 0 + +#define CHAN_HAS_EPIB BIT(30) +#define CHAN_HAS_PSINFO BIT(29) +#define CHAN_ERR_RETRY BIT(28) +#define CHAN_PSINFO_AT_SOP BIT(25) +#define CHAN_SOP_OFF_SHIFT 16 +#define CHAN_SOP_OFF_MASK GENMASK(9, 0) +#define DESC_TYPE_SHIFT 26 +#define DESC_TYPE_MASK GENMASK(2, 0) + +/* + * QMGR & QNUM together make up 14 bits with QMGR as the 2 MSb's in the logical + * navigator cloud mapping scheme. + * using the 14bit physical queue numbers directly maps into this scheme. + */ +#define CHAN_QNUM_MASK GENMASK(14, 0) +#define DMA_MAX_QMS 4 +#define DMA_TIMEOUT 1 /* msecs */ +#define DMA_INVALID_ID 0xffff + +struct reg_global { + u32 revision; + u32 perf_control; + u32 emulation_control; + u32 priority_control; + u32 qm_base_address[DMA_MAX_QMS]; +}; + +struct reg_chan { + u32 control; + u32 mode; + u32 __rsvd[6]; +}; + +struct reg_tx_sched { + u32 prio; +}; + +struct reg_rx_flow { + u32 control; + u32 tags; + u32 tag_sel; + u32 fdq_sel[2]; + u32 thresh[3]; +}; + +struct knav_dma_pool_device { + struct device *dev; + struct list_head list; +}; + +struct knav_dma_device { + bool loopback, enable_all; + unsigned tx_priority, rx_priority, rx_timeout; + unsigned logical_queue_managers; + unsigned qm_base_address[DMA_MAX_QMS]; + struct reg_global __iomem *reg_global; + struct reg_chan __iomem *reg_tx_chan; + struct reg_rx_flow __iomem *reg_rx_flow; + struct reg_chan __iomem *reg_rx_chan; + struct reg_tx_sched __iomem *reg_tx_sched; + unsigned max_rx_chan, max_tx_chan; + unsigned max_rx_flow; + char name[32]; + atomic_t ref_count; + struct list_head list; + struct list_head chan_list; + spinlock_t lock; +}; + +struct knav_dma_chan { + enum dma_transfer_direction direction; + struct knav_dma_device *dma; + atomic_t ref_count; + + /* registers */ + struct reg_chan __iomem *reg_chan; + struct reg_tx_sched __iomem *reg_tx_sched; + struct reg_rx_flow __iomem *reg_rx_flow; + + /* configuration stuff */ + unsigned channel, flow; + struct knav_dma_cfg cfg; + struct list_head list; + spinlock_t lock; +}; + +#define chan_number(ch) ((ch->direction == DMA_MEM_TO_DEV) ? \ + ch->channel : ch->flow) + +static struct knav_dma_pool_device *kdev; + +static bool check_config(struct knav_dma_chan *chan, struct knav_dma_cfg *cfg) +{ + if (!memcmp(&chan->cfg, cfg, sizeof(*cfg))) + return true; + else + return false; +} + +static int chan_start(struct knav_dma_chan *chan, + struct knav_dma_cfg *cfg) +{ + u32 v = 0; + + spin_lock(&chan->lock); + if ((chan->direction == DMA_MEM_TO_DEV) && chan->reg_chan) { + if (cfg->u.tx.filt_pswords) + v |= DMA_TX_FILT_PSWORDS; + if (cfg->u.tx.filt_einfo) + v |= DMA_TX_FILT_EINFO; + writel_relaxed(v, &chan->reg_chan->mode); + writel_relaxed(DMA_ENABLE, &chan->reg_chan->control); + } + + if (chan->reg_tx_sched) + writel_relaxed(cfg->u.tx.priority, &chan->reg_tx_sched->prio); + + if (chan->reg_rx_flow) { + v = 0; + + if (cfg->u.rx.einfo_present) + v |= CHAN_HAS_EPIB; + if (cfg->u.rx.psinfo_present) + v |= CHAN_HAS_PSINFO; + if (cfg->u.rx.err_mode == DMA_RETRY) + v |= CHAN_ERR_RETRY; + v |= (cfg->u.rx.desc_type & DESC_TYPE_MASK) << DESC_TYPE_SHIFT; + if (cfg->u.rx.psinfo_at_sop) + v |= CHAN_PSINFO_AT_SOP; + v |= (cfg->u.rx.sop_offset & CHAN_SOP_OFF_MASK) + << CHAN_SOP_OFF_SHIFT; + v |= cfg->u.rx.dst_q & CHAN_QNUM_MASK; + + writel_relaxed(v, &chan->reg_rx_flow->control); + writel_relaxed(0, &chan->reg_rx_flow->tags); + writel_relaxed(0, &chan->reg_rx_flow->tag_sel); + + v = cfg->u.rx.fdq[0] << 16; + v |= cfg->u.rx.fdq[1] & CHAN_QNUM_MASK; + writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[0]); + + v = cfg->u.rx.fdq[2] << 16; + v |= cfg->u.rx.fdq[3] & CHAN_QNUM_MASK; + writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[1]); + + writel_relaxed(0, &chan->reg_rx_flow->thresh[0]); + writel_relaxed(0, &chan->reg_rx_flow->thresh[1]); + writel_relaxed(0, &chan->reg_rx_flow->thresh[2]); + } + + /* Keep a copy of the cfg */ + memcpy(&chan->cfg, cfg, sizeof(*cfg)); + spin_unlock(&chan->lock); + + return 0; +} + +static int chan_teardown(struct knav_dma_chan *chan) +{ + unsigned long end, value; + + if (!chan->reg_chan) + return 0; + + /* indicate teardown */ + writel_relaxed(DMA_TEARDOWN, &chan->reg_chan->control); + + /* wait for the dma to shut itself down */ + end = jiffies + msecs_to_jiffies(DMA_TIMEOUT); + do { + value = readl_relaxed(&chan->reg_chan->control); + if ((value & DMA_ENABLE) == 0) + break; + } while (time_after(end, jiffies)); + + if (readl_relaxed(&chan->reg_chan->control) & DMA_ENABLE) { + dev_err(kdev->dev, "timeout waiting for teardown\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void chan_stop(struct knav_dma_chan *chan) +{ + spin_lock(&chan->lock); + if (chan->reg_rx_flow) { + /* first detach fdqs, starve out the flow */ + writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[0]); + writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[1]); + writel_relaxed(0, &chan->reg_rx_flow->thresh[0]); + writel_relaxed(0, &chan->reg_rx_flow->thresh[1]); + writel_relaxed(0, &chan->reg_rx_flow->thresh[2]); + } + + /* teardown the dma channel */ + chan_teardown(chan); + + /* then disconnect the completion side */ + if (chan->reg_rx_flow) { + writel_relaxed(0, &chan->reg_rx_flow->control); + writel_relaxed(0, &chan->reg_rx_flow->tags); + writel_relaxed(0, &chan->reg_rx_flow->tag_sel); + } + + memset(&chan->cfg, 0, sizeof(struct knav_dma_cfg)); + spin_unlock(&chan->lock); + + dev_dbg(kdev->dev, "channel stopped\n"); +} + +static void dma_hw_enable_all(struct knav_dma_device *dma) +{ + int i; + + for (i = 0; i < dma->max_tx_chan; i++) { + writel_relaxed(0, &dma->reg_tx_chan[i].mode); + writel_relaxed(DMA_ENABLE, &dma->reg_tx_chan[i].control); + } +} + + +static void knav_dma_hw_init(struct knav_dma_device *dma) +{ + unsigned v; + int i; + + spin_lock(&dma->lock); + v = dma->loopback ? DMA_LOOPBACK : 0; + writel_relaxed(v, &dma->reg_global->emulation_control); + + v = readl_relaxed(&dma->reg_global->perf_control); + v |= ((dma->rx_timeout & DMA_RX_TIMEOUT_MASK) << DMA_RX_TIMEOUT_SHIFT); + writel_relaxed(v, &dma->reg_global->perf_control); + + v = ((dma->tx_priority << DMA_TX_PRIO_SHIFT) | + (dma->rx_priority << DMA_RX_PRIO_SHIFT)); + + writel_relaxed(v, &dma->reg_global->priority_control); + + /* Always enable all Rx channels. Rx paths are managed using flows */ + for (i = 0; i < dma->max_rx_chan; i++) + writel_relaxed(DMA_ENABLE, &dma->reg_rx_chan[i].control); + + for (i = 0; i < dma->logical_queue_managers; i++) + writel_relaxed(dma->qm_base_address[i], + &dma->reg_global->qm_base_address[i]); + spin_unlock(&dma->lock); +} + +static void knav_dma_hw_destroy(struct knav_dma_device *dma) +{ + int i; + unsigned v; + + spin_lock(&dma->lock); + v = ~DMA_ENABLE & REG_MASK; + + for (i = 0; i < dma->max_rx_chan; i++) + writel_relaxed(v, &dma->reg_rx_chan[i].control); + + for (i = 0; i < dma->max_tx_chan; i++) + writel_relaxed(v, &dma->reg_tx_chan[i].control); + spin_unlock(&dma->lock); +} + +static void dma_debug_show_channels(struct seq_file *s, + struct knav_dma_chan *chan) +{ + int i; + + seq_printf(s, "\t%s %d:\t", + ((chan->direction == DMA_MEM_TO_DEV) ? "tx chan" : "rx flow"), + chan_number(chan)); + + if (chan->direction == DMA_MEM_TO_DEV) { + seq_printf(s, "einfo - %d, pswords - %d, priority - %d\n", + chan->cfg.u.tx.filt_einfo, + chan->cfg.u.tx.filt_pswords, + chan->cfg.u.tx.priority); + } else { + seq_printf(s, "einfo - %d, psinfo - %d, desc_type - %d\n", + chan->cfg.u.rx.einfo_present, + chan->cfg.u.rx.psinfo_present, + chan->cfg.u.rx.desc_type); + seq_printf(s, "\t\t\tdst_q: [%d], thresh: %d fdq: ", + chan->cfg.u.rx.dst_q, + chan->cfg.u.rx.thresh); + for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++) + seq_printf(s, "[%d]", chan->cfg.u.rx.fdq[i]); + seq_printf(s, "\n"); + } +} + +static void dma_debug_show_devices(struct seq_file *s, + struct knav_dma_device *dma) +{ + struct knav_dma_chan *chan; + + list_for_each_entry(chan, &dma->chan_list, list) { + if (atomic_read(&chan->ref_count)) + dma_debug_show_channels(s, chan); + } +} + +static int dma_debug_show(struct seq_file *s, void *v) +{ + struct knav_dma_device *dma; + + list_for_each_entry(dma, &kdev->list, list) { + if (atomic_read(&dma->ref_count)) { + seq_printf(s, "%s : max_tx_chan: (%d), max_rx_flows: (%d)\n", + dma->name, dma->max_tx_chan, dma->max_rx_flow); + dma_debug_show_devices(s, dma); + } + } + + return 0; +} + +static int knav_dma_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, dma_debug_show, NULL); +} + +static const struct file_operations knav_dma_debug_ops = { + .open = knav_dma_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int of_channel_match_helper(struct device_node *np, const char *name, + const char **dma_instance) +{ + struct of_phandle_args args; + struct device_node *dma_node; + int index; + + dma_node = of_parse_phandle(np, "ti,navigator-dmas", 0); + if (!dma_node) + return -ENODEV; + + *dma_instance = dma_node->name; + index = of_property_match_string(np, "ti,navigator-dma-names", name); + if (index < 0) { + dev_err(kdev->dev, "No 'ti,navigator-dma-names' propery\n"); + return -ENODEV; + } + + if (of_parse_phandle_with_fixed_args(np, "ti,navigator-dmas", + 1, index, &args)) { + dev_err(kdev->dev, "Missing the pahndle args name %s\n", name); + return -ENODEV; + } + + if (args.args[0] < 0) { + dev_err(kdev->dev, "Missing args for %s\n", name); + return -ENODEV; + } + + return args.args[0]; +} + +/** + * knav_dma_open_channel() - try to setup an exclusive slave channel + * @dev: pointer to client device structure + * @name: slave channel name + * @config: dma configuration parameters + * + * Returns pointer to appropriate DMA channel on success or NULL. + */ +void *knav_dma_open_channel(struct device *dev, const char *name, + struct knav_dma_cfg *config) +{ + struct knav_dma_chan *chan; + struct knav_dma_device *dma; + bool found = false; + int chan_num = -1; + const char *instance; + + if (!kdev) { + pr_err("keystone-navigator-dma driver not registered\n"); + return (void *)-EINVAL; + } + + chan_num = of_channel_match_helper(dev->of_node, name, &instance); + if (chan_num < 0) { + dev_err(kdev->dev, "No DMA instace with name %s\n", name); + return (void *)-EINVAL; + } + + dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n", + config->direction == DMA_MEM_TO_DEV ? "transmit" : + config->direction == DMA_DEV_TO_MEM ? "receive" : + "unknown", chan_num, instance); + + if (config->direction != DMA_MEM_TO_DEV && + config->direction != DMA_DEV_TO_MEM) { + dev_err(kdev->dev, "bad direction\n"); + return (void *)-EINVAL; + } + + /* Look for correct dma instance */ + list_for_each_entry(dma, &kdev->list, list) { + if (!strcmp(dma->name, instance)) { + found = true; + break; + } + } + if (!found) { + dev_err(kdev->dev, "No DMA instace with name %s\n", instance); + return (void *)-EINVAL; + } + + /* Look for correct dma channel from dma instance */ + found = false; + list_for_each_entry(chan, &dma->chan_list, list) { + if (config->direction == DMA_MEM_TO_DEV) { + if (chan->channel == chan_num) { + found = true; + break; + } + } else { + if (chan->flow == chan_num) { + found = true; + break; + } + } + } + if (!found) { + dev_err(kdev->dev, "channel %d is not in DMA %s\n", + chan_num, instance); + return (void *)-EINVAL; + } + + if (atomic_read(&chan->ref_count) >= 1) { + if (!check_config(chan, config)) { + dev_err(kdev->dev, "channel %d config miss-match\n", + chan_num); + return (void *)-EINVAL; + } + } + + if (atomic_inc_return(&chan->dma->ref_count) <= 1) + knav_dma_hw_init(chan->dma); + + if (atomic_inc_return(&chan->ref_count) <= 1) + chan_start(chan, config); + + dev_dbg(kdev->dev, "channel %d opened from DMA %s\n", + chan_num, instance); + + return chan; +} +EXPORT_SYMBOL_GPL(knav_dma_open_channel); + +/** + * knav_dma_close_channel() - Destroy a dma channel + * + * channel: dma channel handle + * + */ +void knav_dma_close_channel(void *channel) +{ + struct knav_dma_chan *chan = channel; + + if (!kdev) { + pr_err("keystone-navigator-dma driver not registered\n"); + return; + } + + if (atomic_dec_return(&chan->ref_count) <= 0) + chan_stop(chan); + + if (atomic_dec_return(&chan->dma->ref_count) <= 0) + knav_dma_hw_destroy(chan->dma); + + dev_dbg(kdev->dev, "channel %d or flow %d closed from DMA %s\n", + chan->channel, chan->flow, chan->dma->name); +} +EXPORT_SYMBOL_GPL(knav_dma_close_channel); + +static void __iomem *pktdma_get_regs(struct knav_dma_device *dma, + struct device_node *node, + unsigned index, resource_size_t *_size) +{ + struct device *dev = kdev->dev; + struct resource res; + void __iomem *regs; + int ret; + + ret = of_address_to_resource(node, index, &res); + if (ret) { + dev_err(dev, "Can't translate of node(%s) address for index(%d)\n", + node->name, index); + return ERR_PTR(ret); + } + + regs = devm_ioremap_resource(kdev->dev, &res); + if (IS_ERR(regs)) + dev_err(dev, "Failed to map register base for index(%d) node(%s)\n", + index, node->name); + if (_size) + *_size = resource_size(&res); + + return regs; +} + +static int pktdma_init_rx_chan(struct knav_dma_chan *chan, u32 flow) +{ + struct knav_dma_device *dma = chan->dma; + + chan->flow = flow; + chan->reg_rx_flow = dma->reg_rx_flow + flow; + chan->channel = DMA_INVALID_ID; + dev_dbg(kdev->dev, "rx flow(%d) (%p)\n", chan->flow, chan->reg_rx_flow); + + return 0; +} + +static int pktdma_init_tx_chan(struct knav_dma_chan *chan, u32 channel) +{ + struct knav_dma_device *dma = chan->dma; + + chan->channel = channel; + chan->reg_chan = dma->reg_tx_chan + channel; + chan->reg_tx_sched = dma->reg_tx_sched + channel; + chan->flow = DMA_INVALID_ID; + dev_dbg(kdev->dev, "tx channel(%d) (%p)\n", chan->channel, chan->reg_chan); + + return 0; +} + +static int pktdma_init_chan(struct knav_dma_device *dma, + enum dma_transfer_direction dir, + unsigned chan_num) +{ + struct device *dev = kdev->dev; + struct knav_dma_chan *chan; + int ret = -EINVAL; + + chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL); + if (!chan) + return -ENOMEM; + + INIT_LIST_HEAD(&chan->list); + chan->dma = dma; + chan->direction = DMA_NONE; + atomic_set(&chan->ref_count, 0); + spin_lock_init(&chan->lock); + + if (dir == DMA_MEM_TO_DEV) { + chan->direction = dir; + ret = pktdma_init_tx_chan(chan, chan_num); + } else if (dir == DMA_DEV_TO_MEM) { + chan->direction = dir; + ret = pktdma_init_rx_chan(chan, chan_num); + } else { + dev_err(dev, "channel(%d) direction unknown\n", chan_num); + } + + list_add_tail(&chan->list, &dma->chan_list); + + return ret; +} + +static int dma_init(struct device_node *cloud, struct device_node *dma_node) +{ + unsigned max_tx_chan, max_rx_chan, max_rx_flow, max_tx_sched; + struct device_node *node = dma_node; + struct knav_dma_device *dma; + int ret, len, num_chan = 0; + resource_size_t size; + u32 timeout; + u32 i; + + dma = devm_kzalloc(kdev->dev, sizeof(*dma), GFP_KERNEL); + if (!dma) { + dev_err(kdev->dev, "could not allocate driver mem\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&dma->list); + INIT_LIST_HEAD(&dma->chan_list); + + if (!of_find_property(cloud, "ti,navigator-cloud-address", &len)) { + dev_err(kdev->dev, "unspecified navigator cloud addresses\n"); + return -ENODEV; + } + + dma->logical_queue_managers = len / sizeof(u32); + if (dma->logical_queue_managers > DMA_MAX_QMS) { + dev_warn(kdev->dev, "too many queue mgrs(>%d) rest ignored\n", + dma->logical_queue_managers); + dma->logical_queue_managers = DMA_MAX_QMS; + } + + ret = of_property_read_u32_array(cloud, "ti,navigator-cloud-address", + dma->qm_base_address, + dma->logical_queue_managers); + if (ret) { + dev_err(kdev->dev, "invalid navigator cloud addresses\n"); + return -ENODEV; + } + + dma->reg_global = pktdma_get_regs(dma, node, 0, &size); + if (!dma->reg_global) + return -ENODEV; + if (size < sizeof(struct reg_global)) { + dev_err(kdev->dev, "bad size %pa for global regs\n", &size); + return -ENODEV; + } + + dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size); + if (!dma->reg_tx_chan) + return -ENODEV; + + max_tx_chan = size / sizeof(struct reg_chan); + dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size); + if (!dma->reg_rx_chan) + return -ENODEV; + + max_rx_chan = size / sizeof(struct reg_chan); + dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size); + if (!dma->reg_tx_sched) + return -ENODEV; + + max_tx_sched = size / sizeof(struct reg_tx_sched); + dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size); + if (!dma->reg_rx_flow) + return -ENODEV; + + max_rx_flow = size / sizeof(struct reg_rx_flow); + dma->rx_priority = DMA_PRIO_DEFAULT; + dma->tx_priority = DMA_PRIO_DEFAULT; + + dma->enable_all = (of_get_property(node, "ti,enable-all", NULL) != NULL); + dma->loopback = (of_get_property(node, "ti,loop-back", NULL) != NULL); + + ret = of_property_read_u32(node, "ti,rx-retry-timeout", &timeout); + if (ret < 0) { + dev_dbg(kdev->dev, "unspecified rx timeout using value %d\n", + DMA_RX_TIMEOUT_DEFAULT); + timeout = DMA_RX_TIMEOUT_DEFAULT; + } + + dma->rx_timeout = timeout; + dma->max_rx_chan = max_rx_chan; + dma->max_rx_flow = max_rx_flow; + dma->max_tx_chan = min(max_tx_chan, max_tx_sched); + atomic_set(&dma->ref_count, 0); + strcpy(dma->name, node->name); + spin_lock_init(&dma->lock); + + for (i = 0; i < dma->max_tx_chan; i++) { + if (pktdma_init_chan(dma, DMA_MEM_TO_DEV, i) >= 0) + num_chan++; + } + + for (i = 0; i < dma->max_rx_flow; i++) { + if (pktdma_init_chan(dma, DMA_DEV_TO_MEM, i) >= 0) + num_chan++; + } + + list_add_tail(&dma->list, &kdev->list); + + /* + * For DSP software usecases or userpace transport software, setup all + * the DMA hardware resources. + */ + if (dma->enable_all) { + atomic_inc(&dma->ref_count); + knav_dma_hw_init(dma); + dma_hw_enable_all(dma); + } + + dev_info(kdev->dev, "DMA %s registered %d logical channels, flows %d, tx chans: %d, rx chans: %d%s\n", + dma->name, num_chan, dma->max_rx_flow, + dma->max_tx_chan, dma->max_rx_chan, + dma->loopback ? ", loopback" : ""); + + return 0; +} + +static int knav_dma_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = pdev->dev.of_node; + struct device_node *child; + int ret = 0; + + if (!node) { + dev_err(&pdev->dev, "could not find device info\n"); + return -EINVAL; + } + + kdev = devm_kzalloc(dev, + sizeof(struct knav_dma_pool_device), GFP_KERNEL); + if (!kdev) { + dev_err(dev, "could not allocate driver mem\n"); + return -ENOMEM; + } + + kdev->dev = dev; + INIT_LIST_HEAD(&kdev->list); + + pm_runtime_enable(kdev->dev); + ret = pm_runtime_get_sync(kdev->dev); + if (ret < 0) { + dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret); + return ret; + } + + /* Initialise all packet dmas */ + for_each_child_of_node(node, child) { + ret = dma_init(node, child); + if (ret) { + dev_err(&pdev->dev, "init failed with %d\n", ret); + break; + } + } + + if (list_empty(&kdev->list)) { + dev_err(dev, "no valid dma instance\n"); + return -ENODEV; + } + + debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL, + &knav_dma_debug_ops); + + return ret; +} + +static int knav_dma_remove(struct platform_device *pdev) +{ + struct knav_dma_device *dma; + + list_for_each_entry(dma, &kdev->list, list) { + if (atomic_dec_return(&dma->ref_count) == 0) + knav_dma_hw_destroy(dma); + } + + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static struct of_device_id of_match[] = { + { .compatible = "ti,keystone-navigator-dma", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, of_match); + +static struct platform_driver knav_dma_driver = { + .probe = knav_dma_probe, + .remove = knav_dma_remove, + .driver = { + .name = "keystone-navigator-dma", + .owner = THIS_MODULE, + .of_match_table = of_match, + }, +}; +module_platform_driver(knav_dma_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TI Keystone Navigator Packet DMA driver"); +MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>"); +MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>"); diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h new file mode 100644 index 000000000000..bc9dcc8cc3ce --- /dev/null +++ b/drivers/soc/ti/knav_qmss.h @@ -0,0 +1,386 @@ +/* + * Keystone Navigator QMSS driver internal header + * + * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * Author: Sandeep Nair <sandeep_n@ti.com> + * Cyril Chemparathy <cyril@ti.com> + * Santosh Shilimkar <santosh.shilimkar@ti.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef __KNAV_QMSS_H__ +#define __KNAV_QMSS_H__ + +#define THRESH_GTE BIT(7) +#define THRESH_LT 0 + +#define PDSP_CTRL_PC_MASK 0xffff0000 +#define PDSP_CTRL_SOFT_RESET BIT(0) +#define PDSP_CTRL_ENABLE BIT(1) +#define PDSP_CTRL_RUNNING BIT(15) + +#define ACC_MAX_CHANNEL 48 +#define ACC_DEFAULT_PERIOD 25 /* usecs */ + +#define ACC_CHANNEL_INT_BASE 2 + +#define ACC_LIST_ENTRY_TYPE 1 +#define ACC_LIST_ENTRY_WORDS (1 << ACC_LIST_ENTRY_TYPE) +#define ACC_LIST_ENTRY_QUEUE_IDX 0 +#define ACC_LIST_ENTRY_DESC_IDX (ACC_LIST_ENTRY_WORDS - 1) + +#define ACC_CMD_DISABLE_CHANNEL 0x80 +#define ACC_CMD_ENABLE_CHANNEL 0x81 +#define ACC_CFG_MULTI_QUEUE BIT(21) + +#define ACC_INTD_OFFSET_EOI (0x0010) +#define ACC_INTD_OFFSET_COUNT(ch) (0x0300 + 4 * (ch)) +#define ACC_INTD_OFFSET_STATUS(ch) (0x0200 + 4 * ((ch) / 32)) + +#define RANGE_MAX_IRQS 64 + +#define ACC_DESCS_MAX SZ_1K +#define ACC_DESCS_MASK (ACC_DESCS_MAX - 1) +#define DESC_SIZE_MASK 0xful +#define DESC_PTR_MASK (~DESC_SIZE_MASK) + +#define KNAV_NAME_SIZE 32 + +enum knav_acc_result { + ACC_RET_IDLE, + ACC_RET_SUCCESS, + ACC_RET_INVALID_COMMAND, + ACC_RET_INVALID_CHANNEL, + ACC_RET_INACTIVE_CHANNEL, + ACC_RET_ACTIVE_CHANNEL, + ACC_RET_INVALID_QUEUE, + ACC_RET_INVALID_RET, +}; + +struct knav_reg_config { + u32 revision; + u32 __pad1; + u32 divert; + u32 link_ram_base0; + u32 link_ram_size0; + u32 link_ram_base1; + u32 __pad2[2]; + u32 starvation[0]; +}; + +struct knav_reg_region { + u32 base; + u32 start_index; + u32 size_count; + u32 __pad; +}; + +struct knav_reg_pdsp_regs { + u32 control; + u32 status; + u32 cycle_count; + u32 stall_count; +}; + +struct knav_reg_acc_command { + u32 command; + u32 queue_mask; + u32 list_phys; + u32 queue_num; + u32 timer_config; +}; + +struct knav_link_ram_block { + dma_addr_t phys; + void *virt; + size_t size; +}; + +struct knav_acc_info { + u32 pdsp_id; + u32 start_channel; + u32 list_entries; + u32 pacing_mode; + u32 timer_count; + int mem_size; + int list_size; + struct knav_pdsp_info *pdsp; +}; + +struct knav_acc_channel { + u32 channel; + u32 list_index; + u32 open_mask; + u32 *list_cpu[2]; + dma_addr_t list_dma[2]; + char name[KNAV_NAME_SIZE]; + atomic_t retrigger_count; +}; + +struct knav_pdsp_info { + const char *name; + struct knav_reg_pdsp_regs __iomem *regs; + union { + void __iomem *command; + struct knav_reg_acc_command __iomem *acc_command; + u32 __iomem *qos_command; + }; + void __iomem *intd; + u32 __iomem *iram; + const char *firmware; + u32 id; + struct list_head list; +}; + +struct knav_qmgr_info { + unsigned start_queue; + unsigned num_queues; + struct knav_reg_config __iomem *reg_config; + struct knav_reg_region __iomem *reg_region; + struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek; + void __iomem *reg_status; + struct list_head list; +}; + +#define KNAV_NUM_LINKRAM 2 + +/** + * struct knav_queue_stats: queue statistics + * pushes: number of push operations + * pops: number of pop operations + * push_errors: number of push errors + * pop_errors: number of pop errors + * notifies: notifier counts + */ +struct knav_queue_stats { + atomic_t pushes; + atomic_t pops; + atomic_t push_errors; + atomic_t pop_errors; + atomic_t notifies; +}; + +/** + * struct knav_reg_queue: queue registers + * @entry_count: valid entries in the queue + * @byte_count: total byte count in thhe queue + * @packet_size: packet size for the queue + * @ptr_size_thresh: packet pointer size threshold + */ +struct knav_reg_queue { + u32 entry_count; + u32 byte_count; + u32 packet_size; + u32 ptr_size_thresh; +}; + +/** + * struct knav_region: qmss region info + * @dma_start, dma_end: start and end dma address + * @virt_start, virt_end: start and end virtual address + * @desc_size: descriptor size + * @used_desc: consumed descriptors + * @id: region number + * @num_desc: total descriptors + * @link_index: index of the first descriptor + * @name: region name + * @list: instance in the device's region list + * @pools: list of descriptor pools in the region + */ +struct knav_region { + dma_addr_t dma_start, dma_end; + void *virt_start, *virt_end; + unsigned desc_size; + unsigned used_desc; + unsigned id; + unsigned num_desc; + unsigned link_index; + const char *name; + struct list_head list; + struct list_head pools; +}; + +/** + * struct knav_pool: qmss pools + * @dev: device pointer + * @region: qmss region info + * @queue: queue registers + * @kdev: qmss device pointer + * @region_offset: offset from the base + * @num_desc: total descriptors + * @desc_size: descriptor size + * @region_id: region number + * @name: pool name + * @list: list head + * @region_inst: instance in the region's pool list + */ +struct knav_pool { + struct device *dev; + struct knav_region *region; + struct knav_queue *queue; + struct knav_device *kdev; + int region_offset; + int num_desc; + int desc_size; + int region_id; + const char *name; + struct list_head list; + struct list_head region_inst; +}; + +/** + * struct knav_queue_inst: qmss queue instace properties + * @descs: descriptor pointer + * @desc_head, desc_tail, desc_count: descriptor counters + * @acc: accumulator channel pointer + * @kdev: qmss device pointer + * @range: range info + * @qmgr: queue manager info + * @id: queue instace id + * @irq_num: irq line number + * @notify_needed: notifier needed based on queue type + * @num_notifiers: total notifiers + * @handles: list head + * @name: queue instance name + * @irq_name: irq line name + */ +struct knav_queue_inst { + u32 *descs; + atomic_t desc_head, desc_tail, desc_count; + struct knav_acc_channel *acc; + struct knav_device *kdev; + struct knav_range_info *range; + struct knav_qmgr_info *qmgr; + u32 id; + int irq_num; + int notify_needed; + atomic_t num_notifiers; + struct list_head handles; + const char *name; + const char *irq_name; +}; + +/** + * struct knav_queue: qmss queue properties + * @reg_push, reg_pop, reg_peek: push, pop queue registers + * @inst: qmss queue instace properties + * @notifier_fn: notifier function + * @notifier_fn_arg: notifier function argument + * @notifier_enabled: notier enabled for a give queue + * @rcu: rcu head + * @flags: queue flags + * @list: list head + */ +struct knav_queue { + struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek; + struct knav_queue_inst *inst; + struct knav_queue_stats stats; + knav_queue_notify_fn notifier_fn; + void *notifier_fn_arg; + atomic_t notifier_enabled; + struct rcu_head rcu; + unsigned flags; + struct list_head list; +}; + +struct knav_device { + struct device *dev; + unsigned base_id; + unsigned num_queues; + unsigned num_queues_in_use; + unsigned inst_shift; + struct knav_link_ram_block link_rams[KNAV_NUM_LINKRAM]; + void *instances; + struct list_head regions; + struct list_head queue_ranges; + struct list_head pools; + struct list_head pdsps; + struct list_head qmgrs; +}; + +struct knav_range_ops { + int (*init_range)(struct knav_range_info *range); + int (*free_range)(struct knav_range_info *range); + int (*init_queue)(struct knav_range_info *range, + struct knav_queue_inst *inst); + int (*open_queue)(struct knav_range_info *range, + struct knav_queue_inst *inst, unsigned flags); + int (*close_queue)(struct knav_range_info *range, + struct knav_queue_inst *inst); + int (*set_notify)(struct knav_range_info *range, + struct knav_queue_inst *inst, bool enabled); +}; + +struct knav_irq_info { + int irq; + u32 cpu_map; +}; + +struct knav_range_info { + const char *name; + struct knav_device *kdev; + unsigned queue_base; + unsigned num_queues; + void *queue_base_inst; + unsigned flags; + struct list_head list; + struct knav_range_ops *ops; + struct knav_acc_info acc_info; + struct knav_acc_channel *acc; + unsigned num_irqs; + struct knav_irq_info irqs[RANGE_MAX_IRQS]; +}; + +#define RANGE_RESERVED BIT(0) +#define RANGE_HAS_IRQ BIT(1) +#define RANGE_HAS_ACCUMULATOR BIT(2) +#define RANGE_MULTI_QUEUE BIT(3) + +#define for_each_region(kdev, region) \ + list_for_each_entry(region, &kdev->regions, list) + +#define first_region(kdev) \ + list_first_entry(&kdev->regions, \ + struct knav_region, list) + +#define for_each_queue_range(kdev, range) \ + list_for_each_entry(range, &kdev->queue_ranges, list) + +#define first_queue_range(kdev) \ + list_first_entry(&kdev->queue_ranges, \ + struct knav_range_info, list) + +#define for_each_pool(kdev, pool) \ + list_for_each_entry(pool, &kdev->pools, list) + +#define for_each_pdsp(kdev, pdsp) \ + list_for_each_entry(pdsp, &kdev->pdsps, list) + +#define for_each_qmgr(kdev, qmgr) \ + list_for_each_entry(qmgr, &kdev->qmgrs, list) + +static inline struct knav_pdsp_info * +knav_find_pdsp(struct knav_device *kdev, unsigned pdsp_id) +{ + struct knav_pdsp_info *pdsp; + + for_each_pdsp(kdev, pdsp) + if (pdsp_id == pdsp->id) + return pdsp; + return NULL; +} + +extern int knav_init_acc_range(struct knav_device *kdev, + struct device_node *node, + struct knav_range_info *range); +extern void knav_queue_notify(struct knav_queue_inst *inst); + +#endif /* __KNAV_QMSS_H__ */ diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c new file mode 100644 index 000000000000..6fbfde6e748f --- /dev/null +++ b/drivers/soc/ti/knav_qmss_acc.c @@ -0,0 +1,591 @@ +/* + * Keystone accumulator queue manager + * + * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * Author: Sandeep Nair <sandeep_n@ti.com> + * Cyril Chemparathy <cyril@ti.com> + * Santosh Shilimkar <santosh.shilimkar@ti.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/bitops.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/soc/ti/knav_qmss.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/firmware.h> + +#include "knav_qmss.h" + +#define knav_range_offset_to_inst(kdev, range, q) \ + (range->queue_base_inst + (q << kdev->inst_shift)) + +static void __knav_acc_notify(struct knav_range_info *range, + struct knav_acc_channel *acc) +{ + struct knav_device *kdev = range->kdev; + struct knav_queue_inst *inst; + int range_base, queue; + + range_base = kdev->base_id + range->queue_base; + + if (range->flags & RANGE_MULTI_QUEUE) { + for (queue = 0; queue < range->num_queues; queue++) { + inst = knav_range_offset_to_inst(kdev, range, + queue); + if (inst->notify_needed) { + inst->notify_needed = 0; + dev_dbg(kdev->dev, "acc-irq: notifying %d\n", + range_base + queue); + knav_queue_notify(inst); + } + } + } else { + queue = acc->channel - range->acc_info.start_channel; + inst = knav_range_offset_to_inst(kdev, range, queue); + dev_dbg(kdev->dev, "acc-irq: notifying %d\n", + range_base + queue); + knav_queue_notify(inst); + } +} + +static int knav_acc_set_notify(struct knav_range_info *range, + struct knav_queue_inst *kq, + bool enabled) +{ + struct knav_pdsp_info *pdsp = range->acc_info.pdsp; + struct knav_device *kdev = range->kdev; + u32 mask, offset; + + /* + * when enabling, we need to re-trigger an interrupt if we + * have descriptors pending + */ + if (!enabled || atomic_read(&kq->desc_count) <= 0) + return 0; + + kq->notify_needed = 1; + atomic_inc(&kq->acc->retrigger_count); + mask = BIT(kq->acc->channel % 32); + offset = ACC_INTD_OFFSET_STATUS(kq->acc->channel); + dev_dbg(kdev->dev, "setup-notify: re-triggering irq for %s\n", + kq->acc->name); + writel_relaxed(mask, pdsp->intd + offset); + return 0; +} + +static irqreturn_t knav_acc_int_handler(int irq, void *_instdata) +{ + struct knav_acc_channel *acc; + struct knav_queue_inst *kq = NULL; + struct knav_range_info *range; + struct knav_pdsp_info *pdsp; + struct knav_acc_info *info; + struct knav_device *kdev; + + u32 *list, *list_cpu, val, idx, notifies; + int range_base, channel, queue = 0; + dma_addr_t list_dma; + + range = _instdata; + info = &range->acc_info; + kdev = range->kdev; + pdsp = range->acc_info.pdsp; + acc = range->acc; + + range_base = kdev->base_id + range->queue_base; + if ((range->flags & RANGE_MULTI_QUEUE) == 0) { + for (queue = 0; queue < range->num_irqs; queue++) + if (range->irqs[queue].irq == irq) + break; + kq = knav_range_offset_to_inst(kdev, range, queue); + acc += queue; + } + + channel = acc->channel; + list_dma = acc->list_dma[acc->list_index]; + list_cpu = acc->list_cpu[acc->list_index]; + dev_dbg(kdev->dev, "acc-irq: channel %d, list %d, virt %p, phys %x\n", + channel, acc->list_index, list_cpu, list_dma); + if (atomic_read(&acc->retrigger_count)) { + atomic_dec(&acc->retrigger_count); + __knav_acc_notify(range, acc); + writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel)); + /* ack the interrupt */ + writel_relaxed(ACC_CHANNEL_INT_BASE + channel, + pdsp->intd + ACC_INTD_OFFSET_EOI); + + return IRQ_HANDLED; + } + + notifies = readl_relaxed(pdsp->intd + ACC_INTD_OFFSET_COUNT(channel)); + WARN_ON(!notifies); + dma_sync_single_for_cpu(kdev->dev, list_dma, info->list_size, + DMA_FROM_DEVICE); + + for (list = list_cpu; list < list_cpu + (info->list_size / sizeof(u32)); + list += ACC_LIST_ENTRY_WORDS) { + if (ACC_LIST_ENTRY_WORDS == 1) { + dev_dbg(kdev->dev, + "acc-irq: list %d, entry @%p, %08x\n", + acc->list_index, list, list[0]); + } else if (ACC_LIST_ENTRY_WORDS == 2) { + dev_dbg(kdev->dev, + "acc-irq: list %d, entry @%p, %08x %08x\n", + acc->list_index, list, list[0], list[1]); + } else if (ACC_LIST_ENTRY_WORDS == 4) { + dev_dbg(kdev->dev, + "acc-irq: list %d, entry @%p, %08x %08x %08x %08x\n", + acc->list_index, list, list[0], list[1], + list[2], list[3]); + } + + val = list[ACC_LIST_ENTRY_DESC_IDX]; + if (!val) + break; + + if (range->flags & RANGE_MULTI_QUEUE) { + queue = list[ACC_LIST_ENTRY_QUEUE_IDX] >> 16; + if (queue < range_base || + queue >= range_base + range->num_queues) { + dev_err(kdev->dev, + "bad queue %d, expecting %d-%d\n", + queue, range_base, + range_base + range->num_queues); + break; + } + queue -= range_base; + kq = knav_range_offset_to_inst(kdev, range, + queue); + } + + if (atomic_inc_return(&kq->desc_count) >= ACC_DESCS_MAX) { + atomic_dec(&kq->desc_count); + dev_err(kdev->dev, + "acc-irq: queue %d full, entry dropped\n", + queue + range_base); + continue; + } + + idx = atomic_inc_return(&kq->desc_tail) & ACC_DESCS_MASK; + kq->descs[idx] = val; + kq->notify_needed = 1; + dev_dbg(kdev->dev, "acc-irq: enqueue %08x at %d, queue %d\n", + val, idx, queue + range_base); + } + + __knav_acc_notify(range, acc); + memset(list_cpu, 0, info->list_size); + dma_sync_single_for_device(kdev->dev, list_dma, info->list_size, + DMA_TO_DEVICE); + + /* flip to the other list */ + acc->list_index ^= 1; + + /* reset the interrupt counter */ + writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel)); + + /* ack the interrupt */ + writel_relaxed(ACC_CHANNEL_INT_BASE + channel, + pdsp->intd + ACC_INTD_OFFSET_EOI); + + return IRQ_HANDLED; +} + +int knav_range_setup_acc_irq(struct knav_range_info *range, + int queue, bool enabled) +{ + struct knav_device *kdev = range->kdev; + struct knav_acc_channel *acc; + unsigned long cpu_map; + int ret = 0, irq; + u32 old, new; + + if (range->flags & RANGE_MULTI_QUEUE) { + acc = range->acc; + irq = range->irqs[0].irq; + cpu_map = range->irqs[0].cpu_map; + } else { + acc = range->acc + queue; + irq = range->irqs[queue].irq; + cpu_map = range->irqs[queue].cpu_map; + } + + old = acc->open_mask; + if (enabled) + new = old | BIT(queue); + else + new = old & ~BIT(queue); + acc->open_mask = new; + + dev_dbg(kdev->dev, + "setup-acc-irq: open mask old %08x, new %08x, channel %s\n", + old, new, acc->name); + + if (likely(new == old)) + return 0; + + if (new && !old) { + dev_dbg(kdev->dev, + "setup-acc-irq: requesting %s for channel %s\n", + acc->name, acc->name); + ret = request_irq(irq, knav_acc_int_handler, 0, acc->name, + range); + if (!ret && cpu_map) { + ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); + if (ret) { + dev_warn(range->kdev->dev, + "Failed to set IRQ affinity\n"); + return ret; + } + } + } + + if (old && !new) { + dev_dbg(kdev->dev, "setup-acc-irq: freeing %s for channel %s\n", + acc->name, acc->name); + free_irq(irq, range); + } + + return ret; +} + +static const char *knav_acc_result_str(enum knav_acc_result result) +{ + static const char * const result_str[] = { + [ACC_RET_IDLE] = "idle", + [ACC_RET_SUCCESS] = "success", + [ACC_RET_INVALID_COMMAND] = "invalid command", + [ACC_RET_INVALID_CHANNEL] = "invalid channel", + [ACC_RET_INACTIVE_CHANNEL] = "inactive channel", + [ACC_RET_ACTIVE_CHANNEL] = "active channel", + [ACC_RET_INVALID_QUEUE] = "invalid queue", + [ACC_RET_INVALID_RET] = "invalid return code", + }; + + if (result >= ARRAY_SIZE(result_str)) + return result_str[ACC_RET_INVALID_RET]; + else + return result_str[result]; +} + +static enum knav_acc_result +knav_acc_write(struct knav_device *kdev, struct knav_pdsp_info *pdsp, + struct knav_reg_acc_command *cmd) +{ + u32 result; + + dev_dbg(kdev->dev, "acc command %08x %08x %08x %08x %08x\n", + cmd->command, cmd->queue_mask, cmd->list_phys, + cmd->queue_num, cmd->timer_config); + + writel_relaxed(cmd->timer_config, &pdsp->acc_command->timer_config); + writel_relaxed(cmd->queue_num, &pdsp->acc_command->queue_num); + writel_relaxed(cmd->list_phys, &pdsp->acc_command->list_phys); + writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask); + writel_relaxed(cmd->command, &pdsp->acc_command->command); + + /* wait for the command to clear */ + do { + result = readl_relaxed(&pdsp->acc_command->command); + } while ((result >> 8) & 0xff); + + return (result >> 24) & 0xff; +} + +static void knav_acc_setup_cmd(struct knav_device *kdev, + struct knav_range_info *range, + struct knav_reg_acc_command *cmd, + int queue) +{ + struct knav_acc_info *info = &range->acc_info; + struct knav_acc_channel *acc; + int queue_base; + u32 queue_mask; + + if (range->flags & RANGE_MULTI_QUEUE) { + acc = range->acc; + queue_base = range->queue_base; + queue_mask = BIT(range->num_queues) - 1; + } else { + acc = range->acc + queue; + queue_base = range->queue_base + queue; + queue_mask = 0; + } + + memset(cmd, 0, sizeof(*cmd)); + cmd->command = acc->channel; + cmd->queue_mask = queue_mask; + cmd->list_phys = acc->list_dma[0]; + cmd->queue_num = info->list_entries << 16; + cmd->queue_num |= queue_base; + + cmd->timer_config = ACC_LIST_ENTRY_TYPE << 18; + if (range->flags & RANGE_MULTI_QUEUE) + cmd->timer_config |= ACC_CFG_MULTI_QUEUE; + cmd->timer_config |= info->pacing_mode << 16; + cmd->timer_config |= info->timer_count; +} + +static void knav_acc_stop(struct knav_device *kdev, + struct knav_range_info *range, + int queue) +{ + struct knav_reg_acc_command cmd; + struct knav_acc_channel *acc; + enum knav_acc_result result; + + acc = range->acc + queue; + + knav_acc_setup_cmd(kdev, range, &cmd, queue); + cmd.command |= ACC_CMD_DISABLE_CHANNEL << 8; + result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd); + + dev_dbg(kdev->dev, "stopped acc channel %s, result %s\n", + acc->name, knav_acc_result_str(result)); +} + +static enum knav_acc_result knav_acc_start(struct knav_device *kdev, + struct knav_range_info *range, + int queue) +{ + struct knav_reg_acc_command cmd; + struct knav_acc_channel *acc; + enum knav_acc_result result; + + acc = range->acc + queue; + + knav_acc_setup_cmd(kdev, range, &cmd, queue); + cmd.command |= ACC_CMD_ENABLE_CHANNEL << 8; + result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd); + + dev_dbg(kdev->dev, "started acc channel %s, result %s\n", + acc->name, knav_acc_result_str(result)); + + return result; +} + +static int knav_acc_init_range(struct knav_range_info *range) +{ + struct knav_device *kdev = range->kdev; + struct knav_acc_channel *acc; + enum knav_acc_result result; + int queue; + + for (queue = 0; queue < range->num_queues; queue++) { + acc = range->acc + queue; + + knav_acc_stop(kdev, range, queue); + acc->list_index = 0; + result = knav_acc_start(kdev, range, queue); + + if (result != ACC_RET_SUCCESS) + return -EIO; + + if (range->flags & RANGE_MULTI_QUEUE) + return 0; + } + return 0; +} + +static int knav_acc_init_queue(struct knav_range_info *range, + struct knav_queue_inst *kq) +{ + unsigned id = kq->id - range->queue_base; + + kq->descs = devm_kzalloc(range->kdev->dev, + ACC_DESCS_MAX * sizeof(u32), GFP_KERNEL); + if (!kq->descs) + return -ENOMEM; + + kq->acc = range->acc; + if ((range->flags & RANGE_MULTI_QUEUE) == 0) + kq->acc += id; + return 0; +} + +static int knav_acc_open_queue(struct knav_range_info *range, + struct knav_queue_inst *inst, unsigned flags) +{ + unsigned id = inst->id - range->queue_base; + + return knav_range_setup_acc_irq(range, id, true); +} + +static int knav_acc_close_queue(struct knav_range_info *range, + struct knav_queue_inst *inst) +{ + unsigned id = inst->id - range->queue_base; + + return knav_range_setup_acc_irq(range, id, false); +} + +static int knav_acc_free_range(struct knav_range_info *range) +{ + struct knav_device *kdev = range->kdev; + struct knav_acc_channel *acc; + struct knav_acc_info *info; + int channel, channels; + + info = &range->acc_info; + + if (range->flags & RANGE_MULTI_QUEUE) + channels = 1; + else + channels = range->num_queues; + + for (channel = 0; channel < channels; channel++) { + acc = range->acc + channel; + if (!acc->list_cpu[0]) + continue; + dma_unmap_single(kdev->dev, acc->list_dma[0], + info->mem_size, DMA_BIDIRECTIONAL); + free_pages_exact(acc->list_cpu[0], info->mem_size); + } + devm_kfree(range->kdev->dev, range->acc); + return 0; +} + +struct knav_range_ops knav_acc_range_ops = { + .set_notify = knav_acc_set_notify, + .init_queue = knav_acc_init_queue, + .open_queue = knav_acc_open_queue, + .close_queue = knav_acc_close_queue, + .init_range = knav_acc_init_range, + .free_range = knav_acc_free_range, +}; + +/** + * knav_init_acc_range: Initialise accumulator ranges + * + * @kdev: qmss device + * @node: device node + * @range: qmms range information + * + * Return 0 on success or error + */ +int knav_init_acc_range(struct knav_device *kdev, + struct device_node *node, + struct knav_range_info *range) +{ + struct knav_acc_channel *acc; + struct knav_pdsp_info *pdsp; + struct knav_acc_info *info; + int ret, channel, channels; + int list_size, mem_size; + dma_addr_t list_dma; + void *list_mem; + u32 config[5]; + + range->flags |= RANGE_HAS_ACCUMULATOR; + info = &range->acc_info; + + ret = of_property_read_u32_array(node, "accumulator", config, 5); + if (ret) + return ret; + + info->pdsp_id = config[0]; + info->start_channel = config[1]; + info->list_entries = config[2]; + info->pacing_mode = config[3]; + info->timer_count = config[4] / ACC_DEFAULT_PERIOD; + + if (info->start_channel > ACC_MAX_CHANNEL) { + dev_err(kdev->dev, "channel %d invalid for range %s\n", + info->start_channel, range->name); + return -EINVAL; + } + + if (info->pacing_mode > 3) { + dev_err(kdev->dev, "pacing mode %d invalid for range %s\n", + info->pacing_mode, range->name); + return -EINVAL; + } + + pdsp = knav_find_pdsp(kdev, info->pdsp_id); + if (!pdsp) { + dev_err(kdev->dev, "pdsp id %d not found for range %s\n", + info->pdsp_id, range->name); + return -EINVAL; + } + + info->pdsp = pdsp; + channels = range->num_queues; + if (of_get_property(node, "multi-queue", NULL)) { + range->flags |= RANGE_MULTI_QUEUE; + channels = 1; + if (range->queue_base & (32 - 1)) { + dev_err(kdev->dev, + "misaligned multi-queue accumulator range %s\n", + range->name); + return -EINVAL; + } + if (range->num_queues > 32) { + dev_err(kdev->dev, + "too many queues in accumulator range %s\n", + range->name); + return -EINVAL; + } + } + + /* figure out list size */ + list_size = info->list_entries; + list_size *= ACC_LIST_ENTRY_WORDS * sizeof(u32); + info->list_size = list_size; + mem_size = PAGE_ALIGN(list_size * 2); + info->mem_size = mem_size; + range->acc = devm_kzalloc(kdev->dev, channels * sizeof(*range->acc), + GFP_KERNEL); + if (!range->acc) + return -ENOMEM; + + for (channel = 0; channel < channels; channel++) { + acc = range->acc + channel; + acc->channel = info->start_channel + channel; + + /* allocate memory for the two lists */ + list_mem = alloc_pages_exact(mem_size, GFP_KERNEL | GFP_DMA); + if (!list_mem) + return -ENOMEM; + + list_dma = dma_map_single(kdev->dev, list_mem, mem_size, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(kdev->dev, list_dma)) { + free_pages_exact(list_mem, mem_size); + return -ENOMEM; + } + + memset(list_mem, 0, mem_size); + dma_sync_single_for_device(kdev->dev, list_dma, mem_size, + DMA_TO_DEVICE); + scnprintf(acc->name, sizeof(acc->name), "hwqueue-acc-%d", + acc->channel); + acc->list_cpu[0] = list_mem; + acc->list_cpu[1] = list_mem + list_size; + acc->list_dma[0] = list_dma; + acc->list_dma[1] = list_dma + list_size; + dev_dbg(kdev->dev, "%s: channel %d, phys %08x, virt %8p\n", + acc->name, acc->channel, list_dma, list_mem); + } + + range->ops = &knav_acc_range_ops; + return 0; +} +EXPORT_SYMBOL_GPL(knav_init_acc_range); diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c new file mode 100644 index 000000000000..0a2c8634c48b --- /dev/null +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -0,0 +1,1816 @@ +/* + * Keystone Queue Manager subsystem driver + * + * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * Authors: Sandeep Nair <sandeep_n@ti.com> + * Cyril Chemparathy <cyril@ti.com> + * Santosh Shilimkar <santosh.shilimkar@ti.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/bitops.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/pm_runtime.h> +#include <linux/firmware.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/string.h> +#include <linux/soc/ti/knav_qmss.h> + +#include "knav_qmss.h" + +static struct knav_device *kdev; +static DEFINE_MUTEX(knav_dev_lock); + +/* Queue manager register indices in DTS */ +#define KNAV_QUEUE_PEEK_REG_INDEX 0 +#define KNAV_QUEUE_STATUS_REG_INDEX 1 +#define KNAV_QUEUE_CONFIG_REG_INDEX 2 +#define KNAV_QUEUE_REGION_REG_INDEX 3 +#define KNAV_QUEUE_PUSH_REG_INDEX 4 +#define KNAV_QUEUE_POP_REG_INDEX 5 + +/* PDSP register indices in DTS */ +#define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0 +#define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1 +#define KNAV_QUEUE_PDSP_INTD_REG_INDEX 2 +#define KNAV_QUEUE_PDSP_CMD_REG_INDEX 3 + +#define knav_queue_idx_to_inst(kdev, idx) \ + (kdev->instances + (idx << kdev->inst_shift)) + +#define for_each_handle_rcu(qh, inst) \ + list_for_each_entry_rcu(qh, &inst->handles, list) + +#define for_each_instance(idx, inst, kdev) \ + for (idx = 0, inst = kdev->instances; \ + idx < (kdev)->num_queues_in_use; \ + idx++, inst = knav_queue_idx_to_inst(kdev, idx)) + +/** + * knav_queue_notify: qmss queue notfier call + * + * @inst: qmss queue instance like accumulator + */ +void knav_queue_notify(struct knav_queue_inst *inst) +{ + struct knav_queue *qh; + + if (!inst) + return; + + rcu_read_lock(); + for_each_handle_rcu(qh, inst) { + if (atomic_read(&qh->notifier_enabled) <= 0) + continue; + if (WARN_ON(!qh->notifier_fn)) + continue; + atomic_inc(&qh->stats.notifies); + qh->notifier_fn(qh->notifier_fn_arg); + } + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(knav_queue_notify); + +static irqreturn_t knav_queue_int_handler(int irq, void *_instdata) +{ + struct knav_queue_inst *inst = _instdata; + + knav_queue_notify(inst); + return IRQ_HANDLED; +} + +static int knav_queue_setup_irq(struct knav_range_info *range, + struct knav_queue_inst *inst) +{ + unsigned queue = inst->id - range->queue_base; + unsigned long cpu_map; + int ret = 0, irq; + + if (range->flags & RANGE_HAS_IRQ) { + irq = range->irqs[queue].irq; + cpu_map = range->irqs[queue].cpu_map; + ret = request_irq(irq, knav_queue_int_handler, 0, + inst->irq_name, inst); + if (ret) + return ret; + disable_irq(irq); + if (cpu_map) { + ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); + if (ret) { + dev_warn(range->kdev->dev, + "Failed to set IRQ affinity\n"); + return ret; + } + } + } + return ret; +} + +static void knav_queue_free_irq(struct knav_queue_inst *inst) +{ + struct knav_range_info *range = inst->range; + unsigned queue = inst->id - inst->range->queue_base; + int irq; + + if (range->flags & RANGE_HAS_IRQ) { + irq = range->irqs[queue].irq; + irq_set_affinity_hint(irq, NULL); + free_irq(irq, inst); + } +} + +static inline bool knav_queue_is_busy(struct knav_queue_inst *inst) +{ + return !list_empty(&inst->handles); +} + +static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst) +{ + return inst->range->flags & RANGE_RESERVED; +} + +static inline bool knav_queue_is_shared(struct knav_queue_inst *inst) +{ + struct knav_queue *tmp; + + rcu_read_lock(); + for_each_handle_rcu(tmp, inst) { + if (tmp->flags & KNAV_QUEUE_SHARED) { + rcu_read_unlock(); + return true; + } + } + rcu_read_unlock(); + return false; +} + +static inline bool knav_queue_match_type(struct knav_queue_inst *inst, + unsigned type) +{ + if ((type == KNAV_QUEUE_QPEND) && + (inst->range->flags & RANGE_HAS_IRQ)) { + return true; + } else if ((type == KNAV_QUEUE_ACC) && + (inst->range->flags & RANGE_HAS_ACCUMULATOR)) { + return true; + } else if ((type == KNAV_QUEUE_GP) && + !(inst->range->flags & + (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) { + return true; + } + return false; +} + +static inline struct knav_queue_inst * +knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id) +{ + struct knav_queue_inst *inst; + int idx; + + for_each_instance(idx, inst, kdev) { + if (inst->id == id) + return inst; + } + return NULL; +} + +static inline struct knav_queue_inst *knav_queue_find_by_id(int id) +{ + if (kdev->base_id <= id && + kdev->base_id + kdev->num_queues > id) { + id -= kdev->base_id; + return knav_queue_match_id_to_inst(kdev, id); + } + return NULL; +} + +static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst, + const char *name, unsigned flags) +{ + struct knav_queue *qh; + unsigned id; + int ret = 0; + + qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL); + if (!qh) + return ERR_PTR(-ENOMEM); + + qh->flags = flags; + qh->inst = inst; + id = inst->id - inst->qmgr->start_queue; + qh->reg_push = &inst->qmgr->reg_push[id]; + qh->reg_pop = &inst->qmgr->reg_pop[id]; + qh->reg_peek = &inst->qmgr->reg_peek[id]; + + /* first opener? */ + if (!knav_queue_is_busy(inst)) { + struct knav_range_info *range = inst->range; + + inst->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL); + if (range->ops && range->ops->open_queue) + ret = range->ops->open_queue(range, inst, flags); + + if (ret) { + devm_kfree(inst->kdev->dev, qh); + return ERR_PTR(ret); + } + } + list_add_tail_rcu(&qh->list, &inst->handles); + return qh; +} + +static struct knav_queue * +knav_queue_open_by_id(const char *name, unsigned id, unsigned flags) +{ + struct knav_queue_inst *inst; + struct knav_queue *qh; + + mutex_lock(&knav_dev_lock); + + qh = ERR_PTR(-ENODEV); + inst = knav_queue_find_by_id(id); + if (!inst) + goto unlock_ret; + + qh = ERR_PTR(-EEXIST); + if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst)) + goto unlock_ret; + + qh = ERR_PTR(-EBUSY); + if ((flags & KNAV_QUEUE_SHARED) && + (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst))) + goto unlock_ret; + + qh = __knav_queue_open(inst, name, flags); + +unlock_ret: + mutex_unlock(&knav_dev_lock); + + return qh; +} + +static struct knav_queue *knav_queue_open_by_type(const char *name, + unsigned type, unsigned flags) +{ + struct knav_queue_inst *inst; + struct knav_queue *qh = ERR_PTR(-EINVAL); + int idx; + + mutex_lock(&knav_dev_lock); + + for_each_instance(idx, inst, kdev) { + if (knav_queue_is_reserved(inst)) + continue; + if (!knav_queue_match_type(inst, type)) + continue; + if (knav_queue_is_busy(inst)) + continue; + qh = __knav_queue_open(inst, name, flags); + goto unlock_ret; + } + +unlock_ret: + mutex_unlock(&knav_dev_lock); + return qh; +} + +static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled) +{ + struct knav_range_info *range = inst->range; + + if (range->ops && range->ops->set_notify) + range->ops->set_notify(range, inst, enabled); +} + +static int knav_queue_enable_notifier(struct knav_queue *qh) +{ + struct knav_queue_inst *inst = qh->inst; + bool first; + + if (WARN_ON(!qh->notifier_fn)) + return -EINVAL; + + /* Adjust the per handle notifier count */ + first = (atomic_inc_return(&qh->notifier_enabled) == 1); + if (!first) + return 0; /* nothing to do */ + + /* Now adjust the per instance notifier count */ + first = (atomic_inc_return(&inst->num_notifiers) == 1); + if (first) + knav_queue_set_notify(inst, true); + + return 0; +} + +static int knav_queue_disable_notifier(struct knav_queue *qh) +{ + struct knav_queue_inst *inst = qh->inst; + bool last; + + last = (atomic_dec_return(&qh->notifier_enabled) == 0); + if (!last) + return 0; /* nothing to do */ + + last = (atomic_dec_return(&inst->num_notifiers) == 0); + if (last) + knav_queue_set_notify(inst, false); + + return 0; +} + +static int knav_queue_set_notifier(struct knav_queue *qh, + struct knav_queue_notify_config *cfg) +{ + knav_queue_notify_fn old_fn = qh->notifier_fn; + + if (!cfg) + return -EINVAL; + + if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) + return -ENOTSUPP; + + if (!cfg->fn && old_fn) + knav_queue_disable_notifier(qh); + + qh->notifier_fn = cfg->fn; + qh->notifier_fn_arg = cfg->fn_arg; + + if (cfg->fn && !old_fn) + knav_queue_enable_notifier(qh); + + return 0; +} + +static int knav_gp_set_notify(struct knav_range_info *range, + struct knav_queue_inst *inst, + bool enabled) +{ + unsigned queue; + + if (range->flags & RANGE_HAS_IRQ) { + queue = inst->id - range->queue_base; + if (enabled) + enable_irq(range->irqs[queue].irq); + else + disable_irq_nosync(range->irqs[queue].irq); + } + return 0; +} + +static int knav_gp_open_queue(struct knav_range_info *range, + struct knav_queue_inst *inst, unsigned flags) +{ + return knav_queue_setup_irq(range, inst); +} + +static int knav_gp_close_queue(struct knav_range_info *range, + struct knav_queue_inst *inst) +{ + knav_queue_free_irq(inst); + return 0; +} + +struct knav_range_ops knav_gp_range_ops = { + .set_notify = knav_gp_set_notify, + .open_queue = knav_gp_open_queue, + .close_queue = knav_gp_close_queue, +}; + + +static int knav_queue_get_count(void *qhandle) +{ + struct knav_queue *qh = qhandle; + struct knav_queue_inst *inst = qh->inst; + + return readl_relaxed(&qh->reg_peek[0].entry_count) + + atomic_read(&inst->desc_count); +} + +static void knav_queue_debug_show_instance(struct seq_file *s, + struct knav_queue_inst *inst) +{ + struct knav_device *kdev = inst->kdev; + struct knav_queue *qh; + + if (!knav_queue_is_busy(inst)) + return; + + seq_printf(s, "\tqueue id %d (%s)\n", + kdev->base_id + inst->id, inst->name); + for_each_handle_rcu(qh, inst) { + seq_printf(s, "\t\thandle %p: ", qh); + seq_printf(s, "pushes %8d, ", + atomic_read(&qh->stats.pushes)); + seq_printf(s, "pops %8d, ", + atomic_read(&qh->stats.pops)); + seq_printf(s, "count %8d, ", + knav_queue_get_count(qh)); + seq_printf(s, "notifies %8d, ", + atomic_read(&qh->stats.notifies)); + seq_printf(s, "push errors %8d, ", + atomic_read(&qh->stats.push_errors)); + seq_printf(s, "pop errors %8d\n", + atomic_read(&qh->stats.pop_errors)); + } +} + +static int knav_queue_debug_show(struct seq_file *s, void *v) +{ + struct knav_queue_inst *inst; + int idx; + + mutex_lock(&knav_dev_lock); + seq_printf(s, "%s: %u-%u\n", + dev_name(kdev->dev), kdev->base_id, + kdev->base_id + kdev->num_queues - 1); + for_each_instance(idx, inst, kdev) + knav_queue_debug_show_instance(s, inst); + mutex_unlock(&knav_dev_lock); + + return 0; +} + +static int knav_queue_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, knav_queue_debug_show, NULL); +} + +static const struct file_operations knav_queue_debug_ops = { + .open = knav_queue_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout, + u32 flags) +{ + unsigned long end; + u32 val = 0; + + end = jiffies + msecs_to_jiffies(timeout); + while (time_after(end, jiffies)) { + val = readl_relaxed(addr); + if (flags) + val &= flags; + if (!val) + break; + cpu_relax(); + } + return val ? -ETIMEDOUT : 0; +} + + +static int knav_queue_flush(struct knav_queue *qh) +{ + struct knav_queue_inst *inst = qh->inst; + unsigned id = inst->id - inst->qmgr->start_queue; + + atomic_set(&inst->desc_count, 0); + writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh); + return 0; +} + +/** + * knav_queue_open() - open a hardware queue + * @name - name to give the queue handle + * @id - desired queue number if any or specifes the type + * of queue + * @flags - the following flags are applicable to queues: + * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are + * exclusive by default. + * Subsequent attempts to open a shared queue should + * also have this flag. + * + * Returns a handle to the open hardware queue if successful. Use IS_ERR() + * to check the returned value for error codes. + */ +void *knav_queue_open(const char *name, unsigned id, + unsigned flags) +{ + struct knav_queue *qh = ERR_PTR(-EINVAL); + + switch (id) { + case KNAV_QUEUE_QPEND: + case KNAV_QUEUE_ACC: + case KNAV_QUEUE_GP: + qh = knav_queue_open_by_type(name, id, flags); + break; + + default: + qh = knav_queue_open_by_id(name, id, flags); + break; + } + return qh; +} +EXPORT_SYMBOL_GPL(knav_queue_open); + +/** + * knav_queue_close() - close a hardware queue handle + * @qh - handle to close + */ +void knav_queue_close(void *qhandle) +{ + struct knav_queue *qh = qhandle; + struct knav_queue_inst *inst = qh->inst; + + while (atomic_read(&qh->notifier_enabled) > 0) + knav_queue_disable_notifier(qh); + + mutex_lock(&knav_dev_lock); + list_del_rcu(&qh->list); + mutex_unlock(&knav_dev_lock); + synchronize_rcu(); + if (!knav_queue_is_busy(inst)) { + struct knav_range_info *range = inst->range; + + if (range->ops && range->ops->close_queue) + range->ops->close_queue(range, inst); + } + devm_kfree(inst->kdev->dev, qh); +} +EXPORT_SYMBOL_GPL(knav_queue_close); + +/** + * knav_queue_device_control() - Perform control operations on a queue + * @qh - queue handle + * @cmd - control commands + * @arg - command argument + * + * Returns 0 on success, errno otherwise. + */ +int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd, + unsigned long arg) +{ + struct knav_queue *qh = qhandle; + struct knav_queue_notify_config *cfg; + int ret; + + switch ((int)cmd) { + case KNAV_QUEUE_GET_ID: + ret = qh->inst->kdev->base_id + qh->inst->id; + break; + + case KNAV_QUEUE_FLUSH: + ret = knav_queue_flush(qh); + break; + + case KNAV_QUEUE_SET_NOTIFIER: + cfg = (void *)arg; + ret = knav_queue_set_notifier(qh, cfg); + break; + + case KNAV_QUEUE_ENABLE_NOTIFY: + ret = knav_queue_enable_notifier(qh); + break; + + case KNAV_QUEUE_DISABLE_NOTIFY: + ret = knav_queue_disable_notifier(qh); + break; + + case KNAV_QUEUE_GET_COUNT: + ret = knav_queue_get_count(qh); + break; + + default: + ret = -ENOTSUPP; + break; + } + return ret; +} +EXPORT_SYMBOL_GPL(knav_queue_device_control); + + + +/** + * knav_queue_push() - push data (or descriptor) to the tail of a queue + * @qh - hardware queue handle + * @data - data to push + * @size - size of data to push + * @flags - can be used to pass additional information + * + * Returns 0 on success, errno otherwise. + */ +int knav_queue_push(void *qhandle, dma_addr_t dma, + unsigned size, unsigned flags) +{ + struct knav_queue *qh = qhandle; + u32 val; + + val = (u32)dma | ((size / 16) - 1); + writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh); + + atomic_inc(&qh->stats.pushes); + return 0; +} + +/** + * knav_queue_pop() - pop data (or descriptor) from the head of a queue + * @qh - hardware queue handle + * @size - (optional) size of the data pop'ed. + * + * Returns a DMA address on success, 0 on failure. + */ +dma_addr_t knav_queue_pop(void *qhandle, unsigned *size) +{ + struct knav_queue *qh = qhandle; + struct knav_queue_inst *inst = qh->inst; + dma_addr_t dma; + u32 val, idx; + + /* are we accumulated? */ + if (inst->descs) { + if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) { + atomic_inc(&inst->desc_count); + return 0; + } + idx = atomic_inc_return(&inst->desc_head); + idx &= ACC_DESCS_MASK; + val = inst->descs[idx]; + } else { + val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh); + if (unlikely(!val)) + return 0; + } + + dma = val & DESC_PTR_MASK; + if (size) + *size = ((val & DESC_SIZE_MASK) + 1) * 16; + + atomic_inc(&qh->stats.pops); + return dma; +} + +/* carve out descriptors and push into queue */ +static void kdesc_fill_pool(struct knav_pool *pool) +{ + struct knav_region *region; + int i; + + region = pool->region; + pool->desc_size = region->desc_size; + for (i = 0; i < pool->num_desc; i++) { + int index = pool->region_offset + i; + dma_addr_t dma_addr; + unsigned dma_size; + dma_addr = region->dma_start + (region->desc_size * index); + dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES); + dma_sync_single_for_device(pool->dev, dma_addr, dma_size, + DMA_TO_DEVICE); + knav_queue_push(pool->queue, dma_addr, dma_size, 0); + } +} + +/* pop out descriptors and close the queue */ +static void kdesc_empty_pool(struct knav_pool *pool) +{ + dma_addr_t dma; + unsigned size; + void *desc; + int i; + + if (!pool->queue) + return; + + for (i = 0;; i++) { + dma = knav_queue_pop(pool->queue, &size); + if (!dma) + break; + desc = knav_pool_desc_dma_to_virt(pool, dma); + if (!desc) { + dev_dbg(pool->kdev->dev, + "couldn't unmap desc, continuing\n"); + continue; + } + } + WARN_ON(i != pool->num_desc); + knav_queue_close(pool->queue); +} + + +/* Get the DMA address of a descriptor */ +dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt) +{ + struct knav_pool *pool = ph; + return pool->region->dma_start + (virt - pool->region->virt_start); +} + +void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma) +{ + struct knav_pool *pool = ph; + return pool->region->virt_start + (dma - pool->region->dma_start); +} + +/** + * knav_pool_create() - Create a pool of descriptors + * @name - name to give the pool handle + * @num_desc - numbers of descriptors in the pool + * @region_id - QMSS region id from which the descriptors are to be + * allocated. + * + * Returns a pool handle on success. + * Use IS_ERR_OR_NULL() to identify error values on return. + */ +void *knav_pool_create(const char *name, + int num_desc, int region_id) +{ + struct knav_region *reg_itr, *region = NULL; + struct knav_pool *pool, *pi; + struct list_head *node; + unsigned last_offset; + bool slot_found; + int ret; + + if (!kdev->dev) + return ERR_PTR(-ENODEV); + + pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL); + if (!pool) { + dev_err(kdev->dev, "out of memory allocating pool\n"); + return ERR_PTR(-ENOMEM); + } + + for_each_region(kdev, reg_itr) { + if (reg_itr->id != region_id) + continue; + region = reg_itr; + break; + } + + if (!region) { + dev_err(kdev->dev, "region-id(%d) not found\n", region_id); + ret = -EINVAL; + goto err; + } + + pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0); + if (IS_ERR_OR_NULL(pool->queue)) { + dev_err(kdev->dev, + "failed to open queue for pool(%s), error %ld\n", + name, PTR_ERR(pool->queue)); + ret = PTR_ERR(pool->queue); + goto err; + } + + pool->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL); + pool->kdev = kdev; + pool->dev = kdev->dev; + + mutex_lock(&knav_dev_lock); + + if (num_desc > (region->num_desc - region->used_desc)) { + dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n", + region_id, name); + ret = -ENOMEM; + goto err; + } + + /* Region maintains a sorted (by region offset) list of pools + * use the first free slot which is large enough to accomodate + * the request + */ + last_offset = 0; + slot_found = false; + node = ®ion->pools; + list_for_each_entry(pi, ®ion->pools, region_inst) { + if ((pi->region_offset - last_offset) >= num_desc) { + slot_found = true; + break; + } + last_offset = pi->region_offset + pi->num_desc; + } + node = &pi->region_inst; + + if (slot_found) { + pool->region = region; + pool->num_desc = num_desc; + pool->region_offset = last_offset; + region->used_desc += num_desc; + list_add_tail(&pool->list, &kdev->pools); + list_add_tail(&pool->region_inst, node); + } else { + dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n", + name, region_id); + ret = -ENOMEM; + goto err; + } + + mutex_unlock(&knav_dev_lock); + kdesc_fill_pool(pool); + return pool; + +err: + mutex_unlock(&knav_dev_lock); + kfree(pool->name); + devm_kfree(kdev->dev, pool); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(knav_pool_create); + +/** + * knav_pool_destroy() - Free a pool of descriptors + * @pool - pool handle + */ +void knav_pool_destroy(void *ph) +{ + struct knav_pool *pool = ph; + + if (!pool) + return; + + if (!pool->region) + return; + + kdesc_empty_pool(pool); + mutex_lock(&knav_dev_lock); + + pool->region->used_desc -= pool->num_desc; + list_del(&pool->region_inst); + list_del(&pool->list); + + mutex_unlock(&knav_dev_lock); + kfree(pool->name); + devm_kfree(kdev->dev, pool); +} +EXPORT_SYMBOL_GPL(knav_pool_destroy); + + +/** + * knav_pool_desc_get() - Get a descriptor from the pool + * @pool - pool handle + * + * Returns descriptor from the pool. + */ +void *knav_pool_desc_get(void *ph) +{ + struct knav_pool *pool = ph; + dma_addr_t dma; + unsigned size; + void *data; + + dma = knav_queue_pop(pool->queue, &size); + if (unlikely(!dma)) + return ERR_PTR(-ENOMEM); + data = knav_pool_desc_dma_to_virt(pool, dma); + return data; +} + +/** + * knav_pool_desc_put() - return a descriptor to the pool + * @pool - pool handle + */ +void knav_pool_desc_put(void *ph, void *desc) +{ + struct knav_pool *pool = ph; + dma_addr_t dma; + dma = knav_pool_desc_virt_to_dma(pool, desc); + knav_queue_push(pool->queue, dma, pool->region->desc_size, 0); +} + +/** + * knav_pool_desc_map() - Map descriptor for DMA transfer + * @pool - pool handle + * @desc - address of descriptor to map + * @size - size of descriptor to map + * @dma - DMA address return pointer + * @dma_sz - adjusted return pointer + * + * Returns 0 on success, errno otherwise. + */ +int knav_pool_desc_map(void *ph, void *desc, unsigned size, + dma_addr_t *dma, unsigned *dma_sz) +{ + struct knav_pool *pool = ph; + *dma = knav_pool_desc_virt_to_dma(pool, desc); + size = min(size, pool->region->desc_size); + size = ALIGN(size, SMP_CACHE_BYTES); + *dma_sz = size; + dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE); + + /* Ensure the descriptor reaches to the memory */ + __iowmb(); + + return 0; +} + +/** + * knav_pool_desc_unmap() - Unmap descriptor after DMA transfer + * @pool - pool handle + * @dma - DMA address of descriptor to unmap + * @dma_sz - size of descriptor to unmap + * + * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify + * error values on return. + */ +void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz) +{ + struct knav_pool *pool = ph; + unsigned desc_sz; + void *desc; + + desc_sz = min(dma_sz, pool->region->desc_size); + desc = knav_pool_desc_dma_to_virt(pool, dma); + dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE); + prefetch(desc); + return desc; +} + +/** + * knav_pool_count() - Get the number of descriptors in pool. + * @pool - pool handle + * Returns number of elements in the pool. + */ +int knav_pool_count(void *ph) +{ + struct knav_pool *pool = ph; + return knav_queue_get_count(pool->queue); +} + +static void knav_queue_setup_region(struct knav_device *kdev, + struct knav_region *region) +{ + unsigned hw_num_desc, hw_desc_size, size; + struct knav_reg_region __iomem *regs; + struct knav_qmgr_info *qmgr; + struct knav_pool *pool; + int id = region->id; + struct page *page; + + /* unused region? */ + if (!region->num_desc) { + dev_warn(kdev->dev, "unused region %s\n", region->name); + return; + } + + /* get hardware descriptor value */ + hw_num_desc = ilog2(region->num_desc - 1) + 1; + + /* did we force fit ourselves into nothingness? */ + if (region->num_desc < 32) { + region->num_desc = 0; + dev_warn(kdev->dev, "too few descriptors in region %s\n", + region->name); + return; + } + + size = region->num_desc * region->desc_size; + region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA | + GFP_DMA32); + if (!region->virt_start) { + region->num_desc = 0; + dev_err(kdev->dev, "memory alloc failed for region %s\n", + region->name); + return; + } + region->virt_end = region->virt_start + size; + page = virt_to_page(region->virt_start); + + region->dma_start = dma_map_page(kdev->dev, page, 0, size, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(kdev->dev, region->dma_start)) { + dev_err(kdev->dev, "dma map failed for region %s\n", + region->name); + goto fail; + } + region->dma_end = region->dma_start + size; + + pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL); + if (!pool) { + dev_err(kdev->dev, "out of memory allocating dummy pool\n"); + goto fail; + } + pool->num_desc = 0; + pool->region_offset = region->num_desc; + list_add(&pool->region_inst, ®ion->pools); + + dev_dbg(kdev->dev, + "region %s (%d): size:%d, link:%d@%d, phys:%08x-%08x, virt:%p-%p\n", + region->name, id, region->desc_size, region->num_desc, + region->link_index, region->dma_start, region->dma_end, + region->virt_start, region->virt_end); + + hw_desc_size = (region->desc_size / 16) - 1; + hw_num_desc -= 5; + + for_each_qmgr(kdev, qmgr) { + regs = qmgr->reg_region + id; + writel_relaxed(region->dma_start, ®s->base); + writel_relaxed(region->link_index, ®s->start_index); + writel_relaxed(hw_desc_size << 16 | hw_num_desc, + ®s->size_count); + } + return; + +fail: + if (region->dma_start) + dma_unmap_page(kdev->dev, region->dma_start, size, + DMA_BIDIRECTIONAL); + if (region->virt_start) + free_pages_exact(region->virt_start, size); + region->num_desc = 0; + return; +} + +static const char *knav_queue_find_name(struct device_node *node) +{ + const char *name; + + if (of_property_read_string(node, "label", &name) < 0) + name = node->name; + if (!name) + name = "unknown"; + return name; +} + +static int knav_queue_setup_regions(struct knav_device *kdev, + struct device_node *regions) +{ + struct device *dev = kdev->dev; + struct knav_region *region; + struct device_node *child; + u32 temp[2]; + int ret; + + for_each_child_of_node(regions, child) { + region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL); + if (!region) { + dev_err(dev, "out of memory allocating region\n"); + return -ENOMEM; + } + + region->name = knav_queue_find_name(child); + of_property_read_u32(child, "id", ®ion->id); + ret = of_property_read_u32_array(child, "region-spec", temp, 2); + if (!ret) { + region->num_desc = temp[0]; + region->desc_size = temp[1]; + } else { + dev_err(dev, "invalid region info %s\n", region->name); + devm_kfree(dev, region); + continue; + } + + if (!of_get_property(child, "link-index", NULL)) { + dev_err(dev, "No link info for %s\n", region->name); + devm_kfree(dev, region); + continue; + } + ret = of_property_read_u32(child, "link-index", + ®ion->link_index); + if (ret) { + dev_err(dev, "link index not found for %s\n", + region->name); + devm_kfree(dev, region); + continue; + } + + INIT_LIST_HEAD(®ion->pools); + list_add_tail(®ion->list, &kdev->regions); + } + if (list_empty(&kdev->regions)) { + dev_err(dev, "no valid region information found\n"); + return -ENODEV; + } + + /* Next, we run through the regions and set things up */ + for_each_region(kdev, region) + knav_queue_setup_region(kdev, region); + + return 0; +} + +static int knav_get_link_ram(struct knav_device *kdev, + const char *name, + struct knav_link_ram_block *block) +{ + struct platform_device *pdev = to_platform_device(kdev->dev); + struct device_node *node = pdev->dev.of_node; + u32 temp[2]; + + /* + * Note: link ram resources are specified in "entry" sized units. In + * reality, although entries are ~40bits in hardware, we treat them as + * 64-bit entities here. + * + * For example, to specify the internal link ram for Keystone-I class + * devices, we would set the linkram0 resource to 0x80000-0x83fff. + * + * This gets a bit weird when other link rams are used. For example, + * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries + * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000, + * which accounts for 64-bits per entry, for 16K entries. + */ + if (!of_property_read_u32_array(node, name , temp, 2)) { + if (temp[0]) { + /* + * queue_base specified => using internal or onchip + * link ram WARNING - we do not "reserve" this block + */ + block->phys = (dma_addr_t)temp[0]; + block->virt = NULL; + block->size = temp[1]; + } else { + block->size = temp[1]; + /* queue_base not specific => allocate requested size */ + block->virt = dmam_alloc_coherent(kdev->dev, + 8 * block->size, &block->phys, + GFP_KERNEL); + if (!block->virt) { + dev_err(kdev->dev, "failed to alloc linkram\n"); + return -ENOMEM; + } + } + } else { + return -ENODEV; + } + return 0; +} + +static int knav_queue_setup_link_ram(struct knav_device *kdev) +{ + struct knav_link_ram_block *block; + struct knav_qmgr_info *qmgr; + + for_each_qmgr(kdev, qmgr) { + block = &kdev->link_rams[0]; + dev_dbg(kdev->dev, "linkram0: phys:%x, virt:%p, size:%x\n", + block->phys, block->virt, block->size); + writel_relaxed(block->phys, &qmgr->reg_config->link_ram_base0); + writel_relaxed(block->size, &qmgr->reg_config->link_ram_size0); + + block++; + if (!block->size) + return 0; + + dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n", + block->phys, block->virt, block->size); + writel_relaxed(block->phys, &qmgr->reg_config->link_ram_base1); + } + + return 0; +} + +static int knav_setup_queue_range(struct knav_device *kdev, + struct device_node *node) +{ + struct device *dev = kdev->dev; + struct knav_range_info *range; + struct knav_qmgr_info *qmgr; + u32 temp[2], start, end, id, index; + int ret, i; + + range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL); + if (!range) { + dev_err(dev, "out of memory allocating range\n"); + return -ENOMEM; + } + + range->kdev = kdev; + range->name = knav_queue_find_name(node); + ret = of_property_read_u32_array(node, "qrange", temp, 2); + if (!ret) { + range->queue_base = temp[0] - kdev->base_id; + range->num_queues = temp[1]; + } else { + dev_err(dev, "invalid queue range %s\n", range->name); + devm_kfree(dev, range); + return -EINVAL; + } + + for (i = 0; i < RANGE_MAX_IRQS; i++) { + struct of_phandle_args oirq; + + if (of_irq_parse_one(node, i, &oirq)) + break; + + range->irqs[i].irq = irq_create_of_mapping(&oirq); + if (range->irqs[i].irq == IRQ_NONE) + break; + + range->num_irqs++; + + if (oirq.args_count == 3) + range->irqs[i].cpu_map = + (oirq.args[2] & 0x0000ff00) >> 8; + } + + range->num_irqs = min(range->num_irqs, range->num_queues); + if (range->num_irqs) + range->flags |= RANGE_HAS_IRQ; + + if (of_get_property(node, "qalloc-by-id", NULL)) + range->flags |= RANGE_RESERVED; + + if (of_get_property(node, "accumulator", NULL)) { + ret = knav_init_acc_range(kdev, node, range); + if (ret < 0) { + devm_kfree(dev, range); + return ret; + } + } else { + range->ops = &knav_gp_range_ops; + } + + /* set threshold to 1, and flush out the queues */ + for_each_qmgr(kdev, qmgr) { + start = max(qmgr->start_queue, range->queue_base); + end = min(qmgr->start_queue + qmgr->num_queues, + range->queue_base + range->num_queues); + for (id = start; id < end; id++) { + index = id - qmgr->start_queue; + writel_relaxed(THRESH_GTE | 1, + &qmgr->reg_peek[index].ptr_size_thresh); + writel_relaxed(0, + &qmgr->reg_push[index].ptr_size_thresh); + } + } + + list_add_tail(&range->list, &kdev->queue_ranges); + dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n", + range->name, range->queue_base, + range->queue_base + range->num_queues - 1, + range->num_irqs, + (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "", + (range->flags & RANGE_RESERVED) ? ", reserved" : "", + (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : ""); + kdev->num_queues_in_use += range->num_queues; + return 0; +} + +static int knav_setup_queue_pools(struct knav_device *kdev, + struct device_node *queue_pools) +{ + struct device_node *type, *range; + int ret; + + for_each_child_of_node(queue_pools, type) { + for_each_child_of_node(type, range) { + ret = knav_setup_queue_range(kdev, range); + /* return value ignored, we init the rest... */ + } + } + + /* ... and barf if they all failed! */ + if (list_empty(&kdev->queue_ranges)) { + dev_err(kdev->dev, "no valid queue range found\n"); + return -ENODEV; + } + return 0; +} + +static void knav_free_queue_range(struct knav_device *kdev, + struct knav_range_info *range) +{ + if (range->ops && range->ops->free_range) + range->ops->free_range(range); + list_del(&range->list); + devm_kfree(kdev->dev, range); +} + +static void knav_free_queue_ranges(struct knav_device *kdev) +{ + struct knav_range_info *range; + + for (;;) { + range = first_queue_range(kdev); + if (!range) + break; + knav_free_queue_range(kdev, range); + } +} + +static void knav_queue_free_regions(struct knav_device *kdev) +{ + struct knav_region *region; + struct knav_pool *pool; + unsigned size; + + for (;;) { + region = first_region(kdev); + if (!region) + break; + list_for_each_entry(pool, ®ion->pools, region_inst) + knav_pool_destroy(pool); + + size = region->virt_end - region->virt_start; + if (size) + free_pages_exact(region->virt_start, size); + list_del(®ion->list); + devm_kfree(kdev->dev, region); + } +} + +static void __iomem *knav_queue_map_reg(struct knav_device *kdev, + struct device_node *node, int index) +{ + struct resource res; + void __iomem *regs; + int ret; + + ret = of_address_to_resource(node, index, &res); + if (ret) { + dev_err(kdev->dev, "Can't translate of node(%s) address for index(%d)\n", + node->name, index); + return ERR_PTR(ret); + } + + regs = devm_ioremap_resource(kdev->dev, &res); + if (IS_ERR(regs)) + dev_err(kdev->dev, "Failed to map register base for index(%d) node(%s)\n", + index, node->name); + return regs; +} + +static int knav_queue_init_qmgrs(struct knav_device *kdev, + struct device_node *qmgrs) +{ + struct device *dev = kdev->dev; + struct knav_qmgr_info *qmgr; + struct device_node *child; + u32 temp[2]; + int ret; + + for_each_child_of_node(qmgrs, child) { + qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL); + if (!qmgr) { + dev_err(dev, "out of memory allocating qmgr\n"); + return -ENOMEM; + } + + ret = of_property_read_u32_array(child, "managed-queues", + temp, 2); + if (!ret) { + qmgr->start_queue = temp[0]; + qmgr->num_queues = temp[1]; + } else { + dev_err(dev, "invalid qmgr queue range\n"); + devm_kfree(dev, qmgr); + continue; + } + + dev_info(dev, "qmgr start queue %d, number of queues %d\n", + qmgr->start_queue, qmgr->num_queues); + + qmgr->reg_peek = + knav_queue_map_reg(kdev, child, + KNAV_QUEUE_PEEK_REG_INDEX); + qmgr->reg_status = + knav_queue_map_reg(kdev, child, + KNAV_QUEUE_STATUS_REG_INDEX); + qmgr->reg_config = + knav_queue_map_reg(kdev, child, + KNAV_QUEUE_CONFIG_REG_INDEX); + qmgr->reg_region = + knav_queue_map_reg(kdev, child, + KNAV_QUEUE_REGION_REG_INDEX); + qmgr->reg_push = + knav_queue_map_reg(kdev, child, + KNAV_QUEUE_PUSH_REG_INDEX); + qmgr->reg_pop = + knav_queue_map_reg(kdev, child, + KNAV_QUEUE_POP_REG_INDEX); + + if (IS_ERR(qmgr->reg_peek) || IS_ERR(qmgr->reg_status) || + IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) || + IS_ERR(qmgr->reg_push) || IS_ERR(qmgr->reg_pop)) { + dev_err(dev, "failed to map qmgr regs\n"); + if (!IS_ERR(qmgr->reg_peek)) + devm_iounmap(dev, qmgr->reg_peek); + if (!IS_ERR(qmgr->reg_status)) + devm_iounmap(dev, qmgr->reg_status); + if (!IS_ERR(qmgr->reg_config)) + devm_iounmap(dev, qmgr->reg_config); + if (!IS_ERR(qmgr->reg_region)) + devm_iounmap(dev, qmgr->reg_region); + if (!IS_ERR(qmgr->reg_push)) + devm_iounmap(dev, qmgr->reg_push); + if (!IS_ERR(qmgr->reg_pop)) + devm_iounmap(dev, qmgr->reg_pop); + devm_kfree(dev, qmgr); + continue; + } + + list_add_tail(&qmgr->list, &kdev->qmgrs); + dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n", + qmgr->start_queue, qmgr->num_queues, + qmgr->reg_peek, qmgr->reg_status, + qmgr->reg_config, qmgr->reg_region, + qmgr->reg_push, qmgr->reg_pop); + } + return 0; +} + +static int knav_queue_init_pdsps(struct knav_device *kdev, + struct device_node *pdsps) +{ + struct device *dev = kdev->dev; + struct knav_pdsp_info *pdsp; + struct device_node *child; + int ret; + + for_each_child_of_node(pdsps, child) { + pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL); + if (!pdsp) { + dev_err(dev, "out of memory allocating pdsp\n"); + return -ENOMEM; + } + pdsp->name = knav_queue_find_name(child); + ret = of_property_read_string(child, "firmware", + &pdsp->firmware); + if (ret < 0 || !pdsp->firmware) { + dev_err(dev, "unknown firmware for pdsp %s\n", + pdsp->name); + devm_kfree(dev, pdsp); + continue; + } + dev_dbg(dev, "pdsp name %s fw name :%s\n", pdsp->name, + pdsp->firmware); + + pdsp->iram = + knav_queue_map_reg(kdev, child, + KNAV_QUEUE_PDSP_IRAM_REG_INDEX); + pdsp->regs = + knav_queue_map_reg(kdev, child, + KNAV_QUEUE_PDSP_REGS_REG_INDEX); + pdsp->intd = + knav_queue_map_reg(kdev, child, + KNAV_QUEUE_PDSP_INTD_REG_INDEX); + pdsp->command = + knav_queue_map_reg(kdev, child, + KNAV_QUEUE_PDSP_CMD_REG_INDEX); + + if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) || + IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) { + dev_err(dev, "failed to map pdsp %s regs\n", + pdsp->name); + if (!IS_ERR(pdsp->command)) + devm_iounmap(dev, pdsp->command); + if (!IS_ERR(pdsp->iram)) + devm_iounmap(dev, pdsp->iram); + if (!IS_ERR(pdsp->regs)) + devm_iounmap(dev, pdsp->regs); + if (!IS_ERR(pdsp->intd)) + devm_iounmap(dev, pdsp->intd); + devm_kfree(dev, pdsp); + continue; + } + of_property_read_u32(child, "id", &pdsp->id); + list_add_tail(&pdsp->list, &kdev->pdsps); + dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p, firmware %s\n", + pdsp->name, pdsp->command, pdsp->iram, pdsp->regs, + pdsp->intd, pdsp->firmware); + } + return 0; +} + +static int knav_queue_stop_pdsp(struct knav_device *kdev, + struct knav_pdsp_info *pdsp) +{ + u32 val, timeout = 1000; + int ret; + + val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE; + writel_relaxed(val, &pdsp->regs->control); + ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout, + PDSP_CTRL_RUNNING); + if (ret < 0) { + dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name); + return ret; + } + return 0; +} + +static int knav_queue_load_pdsp(struct knav_device *kdev, + struct knav_pdsp_info *pdsp) +{ + int i, ret, fwlen; + const struct firmware *fw; + u32 *fwdata; + + ret = request_firmware(&fw, pdsp->firmware, kdev->dev); + if (ret) { + dev_err(kdev->dev, "failed to get firmware %s for pdsp %s\n", + pdsp->firmware, pdsp->name); + return ret; + } + writel_relaxed(pdsp->id + 1, pdsp->command + 0x18); + /* download the firmware */ + fwdata = (u32 *)fw->data; + fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32); + for (i = 0; i < fwlen; i++) + writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i); + + release_firmware(fw); + return 0; +} + +static int knav_queue_start_pdsp(struct knav_device *kdev, + struct knav_pdsp_info *pdsp) +{ + u32 val, timeout = 1000; + int ret; + + /* write a command for sync */ + writel_relaxed(0xffffffff, pdsp->command); + while (readl_relaxed(pdsp->command) != 0xffffffff) + cpu_relax(); + + /* soft reset the PDSP */ + val = readl_relaxed(&pdsp->regs->control); + val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET); + writel_relaxed(val, &pdsp->regs->control); + + /* enable pdsp */ + val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE; + writel_relaxed(val, &pdsp->regs->control); + + /* wait for command register to clear */ + ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0); + if (ret < 0) { + dev_err(kdev->dev, + "timed out on pdsp %s command register wait\n", + pdsp->name); + return ret; + } + return 0; +} + +static void knav_queue_stop_pdsps(struct knav_device *kdev) +{ + struct knav_pdsp_info *pdsp; + + /* disable all pdsps */ + for_each_pdsp(kdev, pdsp) + knav_queue_stop_pdsp(kdev, pdsp); +} + +static int knav_queue_start_pdsps(struct knav_device *kdev) +{ + struct knav_pdsp_info *pdsp; + int ret; + + knav_queue_stop_pdsps(kdev); + /* now load them all */ + for_each_pdsp(kdev, pdsp) { + ret = knav_queue_load_pdsp(kdev, pdsp); + if (ret < 0) + return ret; + } + + for_each_pdsp(kdev, pdsp) { + ret = knav_queue_start_pdsp(kdev, pdsp); + WARN_ON(ret); + } + return 0; +} + +static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id) +{ + struct knav_qmgr_info *qmgr; + + for_each_qmgr(kdev, qmgr) { + if ((id >= qmgr->start_queue) && + (id < qmgr->start_queue + qmgr->num_queues)) + return qmgr; + } + return NULL; +} + +static int knav_queue_init_queue(struct knav_device *kdev, + struct knav_range_info *range, + struct knav_queue_inst *inst, + unsigned id) +{ + char irq_name[KNAV_NAME_SIZE]; + inst->qmgr = knav_find_qmgr(id); + if (!inst->qmgr) + return -1; + + INIT_LIST_HEAD(&inst->handles); + inst->kdev = kdev; + inst->range = range; + inst->irq_num = -1; + inst->id = id; + scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id); + inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL); + + if (range->ops && range->ops->init_queue) + return range->ops->init_queue(range, inst); + else + return 0; +} + +static int knav_queue_init_queues(struct knav_device *kdev) +{ + struct knav_range_info *range; + int size, id, base_idx; + int idx = 0, ret = 0; + + /* how much do we need for instance data? */ + size = sizeof(struct knav_queue_inst); + + /* round this up to a power of 2, keep the index to instance + * arithmetic fast. + * */ + kdev->inst_shift = order_base_2(size); + size = (1 << kdev->inst_shift) * kdev->num_queues_in_use; + kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL); + if (!kdev->instances) + return -1; + + for_each_queue_range(kdev, range) { + if (range->ops && range->ops->init_range) + range->ops->init_range(range); + base_idx = idx; + for (id = range->queue_base; + id < range->queue_base + range->num_queues; id++, idx++) { + ret = knav_queue_init_queue(kdev, range, + knav_queue_idx_to_inst(kdev, idx), id); + if (ret < 0) + return ret; + } + range->queue_base_inst = + knav_queue_idx_to_inst(kdev, base_idx); + } + return 0; +} + +static int knav_queue_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct device_node *qmgrs, *queue_pools, *regions, *pdsps; + struct device *dev = &pdev->dev; + u32 temp[2]; + int ret; + + if (!node) { + dev_err(dev, "device tree info unavailable\n"); + return -ENODEV; + } + + kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL); + if (!kdev) { + dev_err(dev, "memory allocation failed\n"); + return -ENOMEM; + } + + platform_set_drvdata(pdev, kdev); + kdev->dev = dev; + INIT_LIST_HEAD(&kdev->queue_ranges); + INIT_LIST_HEAD(&kdev->qmgrs); + INIT_LIST_HEAD(&kdev->pools); + INIT_LIST_HEAD(&kdev->regions); + INIT_LIST_HEAD(&kdev->pdsps); + + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + dev_err(dev, "Failed to enable QMSS\n"); + return ret; + } + + if (of_property_read_u32_array(node, "queue-range", temp, 2)) { + dev_err(dev, "queue-range not specified\n"); + ret = -ENODEV; + goto err; + } + kdev->base_id = temp[0]; + kdev->num_queues = temp[1]; + + /* Initialize queue managers using device tree configuration */ + qmgrs = of_get_child_by_name(node, "qmgrs"); + if (!qmgrs) { + dev_err(dev, "queue manager info not specified\n"); + ret = -ENODEV; + goto err; + } + ret = knav_queue_init_qmgrs(kdev, qmgrs); + of_node_put(qmgrs); + if (ret) + goto err; + + /* get pdsp configuration values from device tree */ + pdsps = of_get_child_by_name(node, "pdsps"); + if (pdsps) { + ret = knav_queue_init_pdsps(kdev, pdsps); + if (ret) + goto err; + + ret = knav_queue_start_pdsps(kdev); + if (ret) + goto err; + } + of_node_put(pdsps); + + /* get usable queue range values from device tree */ + queue_pools = of_get_child_by_name(node, "queue-pools"); + if (!queue_pools) { + dev_err(dev, "queue-pools not specified\n"); + ret = -ENODEV; + goto err; + } + ret = knav_setup_queue_pools(kdev, queue_pools); + of_node_put(queue_pools); + if (ret) + goto err; + + ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]); + if (ret) { + dev_err(kdev->dev, "could not setup linking ram\n"); + goto err; + } + + ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]); + if (ret) { + /* + * nothing really, we have one linking ram already, so we just + * live within our means + */ + } + + ret = knav_queue_setup_link_ram(kdev); + if (ret) + goto err; + + regions = of_get_child_by_name(node, "descriptor-regions"); + if (!regions) { + dev_err(dev, "descriptor-regions not specified\n"); + goto err; + } + ret = knav_queue_setup_regions(kdev, regions); + of_node_put(regions); + if (ret) + goto err; + + ret = knav_queue_init_queues(kdev); + if (ret < 0) { + dev_err(dev, "hwqueue initialization failed\n"); + goto err; + } + + debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL, + &knav_queue_debug_ops); + return 0; + +err: + knav_queue_stop_pdsps(kdev); + knav_queue_free_regions(kdev); + knav_free_queue_ranges(kdev); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return ret; +} + +static int knav_queue_remove(struct platform_device *pdev) +{ + /* TODO: Free resources */ + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return 0; +} + +/* Match table for of_platform binding */ +static struct of_device_id keystone_qmss_of_match[] = { + { .compatible = "ti,keystone-navigator-qmss", }, + {}, +}; +MODULE_DEVICE_TABLE(of, keystone_qmss_of_match); + +static struct platform_driver keystone_qmss_driver = { + .probe = knav_queue_probe, + .remove = knav_queue_remove, + .driver = { + .name = "keystone-navigator-qmss", + .owner = THIS_MODULE, + .of_match_table = keystone_qmss_of_match, + }, +}; +module_platform_driver(keystone_qmss_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs"); +MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>"); +MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>"); diff --git a/drivers/soc/versatile/Kconfig b/drivers/soc/versatile/Kconfig new file mode 100644 index 000000000000..bf5ee9c85330 --- /dev/null +++ b/drivers/soc/versatile/Kconfig @@ -0,0 +1,10 @@ +# +# ARM Versatile SoC drivers +# +config SOC_REALVIEW + bool "SoC bus device for the ARM RealView platforms" + depends on ARCH_REALVIEW + select SOC_BUS + help + Include support for the SoC bus on the ARM RealView platforms + providing some sysfs information about the ASIC variant. diff --git a/drivers/soc/versatile/Makefile b/drivers/soc/versatile/Makefile new file mode 100644 index 000000000000..ad547435648e --- /dev/null +++ b/drivers/soc/versatile/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_SOC_REALVIEW) += soc-realview.o diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c new file mode 100644 index 000000000000..cea8ea3491d2 --- /dev/null +++ b/drivers/soc/versatile/soc-realview.c @@ -0,0 +1,144 @@ +/* + * Copyright (C) 2014 Linaro Ltd. + * + * Author: Linus Walleij <linus.walleij@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + */ +#include <linux/init.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/sys_soc.h> +#include <linux/platform_device.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/of.h> + +/* System ID in syscon */ +#define REALVIEW_SYS_ID_OFFSET 0x00 + +static const struct of_device_id realview_soc_of_match[] = { + { .compatible = "arm,realview-eb-soc", }, + { .compatible = "arm,realview-pb1176-soc", }, + { .compatible = "arm,realview-pb11mp-soc", }, + { .compatible = "arm,realview-pba8-soc", }, + { .compatible = "arm,realview-pbx-soc", }, +}; + +static u32 realview_coreid; + +static const char *realview_board_str(u32 id) +{ + switch ((id >> 16) & 0xfff) { + case 0x0147: + return "HBI-0147"; + default: + return "Unknown"; + } +} + +static const char *realview_arch_str(u32 id) +{ + switch ((id >> 8) & 0xf) { + case 0x05: + return "Multi-layer AXI"; + default: + return "Unknown"; + } +} + +static ssize_t realview_get_manf(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%02x\n", realview_coreid >> 24); +} + +static struct device_attribute realview_manf_attr = + __ATTR(manufacturer, S_IRUGO, realview_get_manf, NULL); + +static ssize_t realview_get_board(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%s\n", realview_board_str(realview_coreid)); +} + +static struct device_attribute realview_board_attr = + __ATTR(board, S_IRUGO, realview_get_board, NULL); + +static ssize_t realview_get_arch(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%s\n", realview_arch_str(realview_coreid)); +} + +static struct device_attribute realview_arch_attr = + __ATTR(fpga, S_IRUGO, realview_get_arch, NULL); + +static ssize_t realview_get_build(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%02x\n", (realview_coreid & 0xFF)); +} + +static struct device_attribute realview_build_attr = + __ATTR(build, S_IRUGO, realview_get_build, NULL); + +static int realview_soc_probe(struct platform_device *pdev) +{ + static struct regmap *syscon_regmap; + struct soc_device *soc_dev; + struct soc_device_attribute *soc_dev_attr; + struct device_node *np = pdev->dev.of_node; + int ret; + + syscon_regmap = syscon_regmap_lookup_by_phandle(np, "regmap"); + if (IS_ERR(syscon_regmap)) + return PTR_ERR(syscon_regmap); + + soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); + if (!soc_dev_attr) + return -ENOMEM; + + ret = of_property_read_string(np, "compatible", + &soc_dev_attr->soc_id); + if (ret) + return -EINVAL; + + soc_dev_attr->machine = "RealView"; + soc_dev_attr->family = "Versatile"; + soc_dev = soc_device_register(soc_dev_attr); + if (IS_ERR(soc_dev)) { + kfree(soc_dev_attr); + return -ENODEV; + } + ret = regmap_read(syscon_regmap, REALVIEW_SYS_ID_OFFSET, + &realview_coreid); + if (ret) + return -ENODEV; + + device_create_file(soc_device_to_device(soc_dev), &realview_manf_attr); + device_create_file(soc_device_to_device(soc_dev), &realview_board_attr); + device_create_file(soc_device_to_device(soc_dev), &realview_arch_attr); + device_create_file(soc_device_to_device(soc_dev), &realview_build_attr); + + dev_info(&pdev->dev, "RealView Syscon Core ID: 0x%08x\n", + realview_coreid); + /* FIXME: add attributes for SoC to sysfs */ + return 0; +} + +static struct platform_driver realview_soc_driver = { + .probe = realview_soc_probe, + .driver = { + .name = "realview-soc", + .of_match_table = realview_soc_of_match, + }, +}; +module_platform_driver(realview_soc_driver); |