diff options
49 files changed, 3609 insertions, 721 deletions
diff --git a/Documentation/hwspinlock.txt b/Documentation/hwspinlock.txt new file mode 100644 index 000000000000..7dcd1a4e726c --- /dev/null +++ b/Documentation/hwspinlock.txt @@ -0,0 +1,293 @@ +Hardware Spinlock Framework + +1. Introduction + +Hardware spinlock modules provide hardware assistance for synchronization +and mutual exclusion between heterogeneous processors and those not operating +under a single, shared operating system. + +For example, OMAP4 has dual Cortex-A9, dual Cortex-M3 and a C64x+ DSP, +each of which is running a different Operating System (the master, A9, +is usually running Linux and the slave processors, the M3 and the DSP, +are running some flavor of RTOS). + +A generic hwspinlock framework allows platform-independent drivers to use +the hwspinlock device in order to access data structures that are shared +between remote processors, that otherwise have no alternative mechanism +to accomplish synchronization and mutual exclusion operations. + +This is necessary, for example, for Inter-processor communications: +on OMAP4, cpu-intensive multimedia tasks are offloaded by the host to the +remote M3 and/or C64x+ slave processors (by an IPC subsystem called Syslink). + +To achieve fast message-based communications, a minimal kernel support +is needed to deliver messages arriving from a remote processor to the +appropriate user process. + +This communication is based on simple data structures that is shared between +the remote processors, and access to it is synchronized using the hwspinlock +module (remote processor directly places new messages in this shared data +structure). + +A common hwspinlock interface makes it possible to have generic, platform- +independent, drivers. + +2. User API + + struct hwspinlock *hwspin_lock_request(void); + - dynamically assign an hwspinlock and return its address, or NULL + in case an unused hwspinlock isn't available. Users of this + API will usually want to communicate the lock's id to the remote core + before it can be used to achieve synchronization. + Can be called from an atomic context (this function will not sleep) but + not from within interrupt context. + + struct hwspinlock *hwspin_lock_request_specific(unsigned int id); + - assign a specific hwspinlock id and return its address, or NULL + if that hwspinlock is already in use. Usually board code will + be calling this function in order to reserve specific hwspinlock + ids for predefined purposes. + Can be called from an atomic context (this function will not sleep) but + not from within interrupt context. + + int hwspin_lock_free(struct hwspinlock *hwlock); + - free a previously-assigned hwspinlock; returns 0 on success, or an + appropriate error code on failure (e.g. -EINVAL if the hwspinlock + is already free). + Can be called from an atomic context (this function will not sleep) but + not from within interrupt context. + + int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout); + - lock a previously-assigned hwspinlock with a timeout limit (specified in + msecs). If the hwspinlock is already taken, the function will busy loop + waiting for it to be released, but give up when the timeout elapses. + Upon a successful return from this function, preemption is disabled so + the caller must not sleep, and is advised to release the hwspinlock as + soon as possible, in order to minimize remote cores polling on the + hardware interconnect. + Returns 0 when successful and an appropriate error code otherwise (most + notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs). + The function will never sleep. + + int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int timeout); + - lock a previously-assigned hwspinlock with a timeout limit (specified in + msecs). If the hwspinlock is already taken, the function will busy loop + waiting for it to be released, but give up when the timeout elapses. + Upon a successful return from this function, preemption and the local + interrupts are disabled, so the caller must not sleep, and is advised to + release the hwspinlock as soon as possible. + Returns 0 when successful and an appropriate error code otherwise (most + notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs). + The function will never sleep. + + int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock, unsigned int to, + unsigned long *flags); + - lock a previously-assigned hwspinlock with a timeout limit (specified in + msecs). If the hwspinlock is already taken, the function will busy loop + waiting for it to be released, but give up when the timeout elapses. + Upon a successful return from this function, preemption is disabled, + local interrupts are disabled and their previous state is saved at the + given flags placeholder. The caller must not sleep, and is advised to + release the hwspinlock as soon as possible. + Returns 0 when successful and an appropriate error code otherwise (most + notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs). + The function will never sleep. + + int hwspin_trylock(struct hwspinlock *hwlock); + - attempt to lock a previously-assigned hwspinlock, but immediately fail if + it is already taken. + Upon a successful return from this function, preemption is disabled so + caller must not sleep, and is advised to release the hwspinlock as soon as + possible, in order to minimize remote cores polling on the hardware + interconnect. + Returns 0 on success and an appropriate error code otherwise (most + notably -EBUSY if the hwspinlock was already taken). + The function will never sleep. + + int hwspin_trylock_irq(struct hwspinlock *hwlock); + - attempt to lock a previously-assigned hwspinlock, but immediately fail if + it is already taken. + Upon a successful return from this function, preemption and the local + interrupts are disabled so caller must not sleep, and is advised to + release the hwspinlock as soon as possible. + Returns 0 on success and an appropriate error code otherwise (most + notably -EBUSY if the hwspinlock was already taken). + The function will never sleep. + + int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags); + - attempt to lock a previously-assigned hwspinlock, but immediately fail if + it is already taken. + Upon a successful return from this function, preemption is disabled, + the local interrupts are disabled and their previous state is saved + at the given flags placeholder. The caller must not sleep, and is advised + to release the hwspinlock as soon as possible. + Returns 0 on success and an appropriate error code otherwise (most + notably -EBUSY if the hwspinlock was already taken). + The function will never sleep. + + void hwspin_unlock(struct hwspinlock *hwlock); + - unlock a previously-locked hwspinlock. Always succeed, and can be called + from any context (the function never sleeps). Note: code should _never_ + unlock an hwspinlock which is already unlocked (there is no protection + against this). + + void hwspin_unlock_irq(struct hwspinlock *hwlock); + - unlock a previously-locked hwspinlock and enable local interrupts. + The caller should _never_ unlock an hwspinlock which is already unlocked. + Doing so is considered a bug (there is no protection against this). + Upon a successful return from this function, preemption and local + interrupts are enabled. This function will never sleep. + + void + hwspin_unlock_irqrestore(struct hwspinlock *hwlock, unsigned long *flags); + - unlock a previously-locked hwspinlock. + The caller should _never_ unlock an hwspinlock which is already unlocked. + Doing so is considered a bug (there is no protection against this). + Upon a successful return from this function, preemption is reenabled, + and the state of the local interrupts is restored to the state saved at + the given flags. This function will never sleep. + + int hwspin_lock_get_id(struct hwspinlock *hwlock); + - retrieve id number of a given hwspinlock. This is needed when an + hwspinlock is dynamically assigned: before it can be used to achieve + mutual exclusion with a remote cpu, the id number should be communicated + to the remote task with which we want to synchronize. + Returns the hwspinlock id number, or -EINVAL if hwlock is null. + +3. Typical usage + +#include <linux/hwspinlock.h> +#include <linux/err.h> + +int hwspinlock_example1(void) +{ + struct hwspinlock *hwlock; + int ret; + + /* dynamically assign a hwspinlock */ + hwlock = hwspin_lock_request(); + if (!hwlock) + ... + + id = hwspin_lock_get_id(hwlock); + /* probably need to communicate id to a remote processor now */ + + /* take the lock, spin for 1 sec if it's already taken */ + ret = hwspin_lock_timeout(hwlock, 1000); + if (ret) + ... + + /* + * we took the lock, do our thing now, but do NOT sleep + */ + + /* release the lock */ + hwspin_unlock(hwlock); + + /* free the lock */ + ret = hwspin_lock_free(hwlock); + if (ret) + ... + + return ret; +} + +int hwspinlock_example2(void) +{ + struct hwspinlock *hwlock; + int ret; + + /* + * assign a specific hwspinlock id - this should be called early + * by board init code. + */ + hwlock = hwspin_lock_request_specific(PREDEFINED_LOCK_ID); + if (!hwlock) + ... + + /* try to take it, but don't spin on it */ + ret = hwspin_trylock(hwlock); + if (!ret) { + pr_info("lock is already taken\n"); + return -EBUSY; + } + + /* + * we took the lock, do our thing now, but do NOT sleep + */ + + /* release the lock */ + hwspin_unlock(hwlock); + + /* free the lock */ + ret = hwspin_lock_free(hwlock); + if (ret) + ... + + return ret; +} + + +4. API for implementors + + int hwspin_lock_register(struct hwspinlock *hwlock); + - to be called from the underlying platform-specific implementation, in + order to register a new hwspinlock instance. Can be called from an atomic + context (this function will not sleep) but not from within interrupt + context. Returns 0 on success, or appropriate error code on failure. + + struct hwspinlock *hwspin_lock_unregister(unsigned int id); + - to be called from the underlying vendor-specific implementation, in order + to unregister an existing (and unused) hwspinlock instance. + Can be called from an atomic context (will not sleep) but not from + within interrupt context. + Returns the address of hwspinlock on success, or NULL on error (e.g. + if the hwspinlock is sill in use). + +5. struct hwspinlock + +This struct represents an hwspinlock instance. It is registered by the +underlying hwspinlock implementation using the hwspin_lock_register() API. + +/** + * struct hwspinlock - vendor-specific hwspinlock implementation + * + * @dev: underlying device, will be used with runtime PM api + * @ops: vendor-specific hwspinlock handlers + * @id: a global, unique, system-wide, index of the lock. + * @lock: initialized and used by hwspinlock core + * @owner: underlying implementation module, used to maintain module ref count + */ +struct hwspinlock { + struct device *dev; + const struct hwspinlock_ops *ops; + int id; + spinlock_t lock; + struct module *owner; +}; + +The underlying implementation is responsible to assign the dev, ops, id and +owner members. The lock member, OTOH, is initialized and used by the hwspinlock +core. + +6. Implementation callbacks + +There are three possible callbacks defined in 'struct hwspinlock_ops': + +struct hwspinlock_ops { + int (*trylock)(struct hwspinlock *lock); + void (*unlock)(struct hwspinlock *lock); + void (*relax)(struct hwspinlock *lock); +}; + +The first two callbacks are mandatory: + +The ->trylock() callback should make a single attempt to take the lock, and +return 0 on failure and 1 on success. This callback may _not_ sleep. + +The ->unlock() callback releases the lock. It always succeed, and it, too, +may _not_ sleep. + +The ->relax() callback is optional. It is called by hwspinlock core while +spinning on a lock, and can be used by the underlying implementation to force +a delay between two successive invocations of ->trylock(). It may _not_ sleep. diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 9b4e78fe3d1c..b9d8a7b2a862 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -310,6 +310,7 @@ config MACH_OMAP_4430SDP depends on ARCH_OMAP4 select OMAP_PACKAGE_CBL select OMAP_PACKAGE_CBS + select REGULATOR_FIXED_VOLTAGE config MACH_OMAP4_PANDA bool "OMAP4 Panda Board" @@ -317,6 +318,7 @@ config MACH_OMAP4_PANDA depends on ARCH_OMAP4 select OMAP_PACKAGE_CBL select OMAP_PACKAGE_CBS + select REGULATOR_FIXED_VOLTAGE config OMAP3_EMU bool "OMAP3 debugging peripherals" diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index a9e3974d015f..ee72a9787bf1 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -218,7 +218,8 @@ obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o \ hsmmc.o \ omap_phy_internal.o -obj-$(CONFIG_MACH_OMAP3517EVM) += board-am3517evm.o +obj-$(CONFIG_MACH_OMAP3517EVM) += board-am3517evm.o \ + omap_phy_internal.o \ obj-$(CONFIG_MACH_CRANEBOARD) += board-am3517crane.o @@ -243,3 +244,4 @@ obj-y += $(smc91x-m) $(smc91x-y) smsc911x-$(CONFIG_SMSC911X) := gpmc-smsc911x.o obj-y += $(smsc911x-m) $(smsc911x-y) +obj-$(CONFIG_ARCH_OMAP4) += hwspinlock.o diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c index ec74c0f2051c..cc42d474c443 100644 --- a/arch/arm/mach-omap2/board-2430sdp.c +++ b/arch/arm/mach-omap2/board-2430sdp.c @@ -22,6 +22,7 @@ #include <linux/mmc/host.h> #include <linux/delay.h> #include <linux/i2c/twl.h> +#include <linux/regulator/machine.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> @@ -147,6 +148,25 @@ static void __init omap_2430sdp_init_early(void) omap2_init_common_devices(NULL, NULL); } +static struct regulator_consumer_supply sdp2430_vmmc1_supplies[] = { + REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.0"), +}; + +/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */ +static struct regulator_init_data sdp2430_vmmc1 = { + .constraints = { + .min_uV = 1850000, + .max_uV = 3150000, + .valid_modes_mask = REGULATOR_MODE_NORMAL + | REGULATOR_MODE_STANDBY, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE + | REGULATOR_CHANGE_MODE + | REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(sdp2430_vmmc1_supplies), + .consumer_supplies = &sdp2430_vmmc1_supplies[0], +}; + static struct twl4030_gpio_platform_data sdp2430_gpio_data = { .gpio_base = OMAP_MAX_GPIO_LINES, .irq_base = TWL4030_GPIO_IRQ_BASE, @@ -159,6 +179,7 @@ static struct twl4030_platform_data sdp2430_twldata = { /* platform_data for children goes here */ .gpio = &sdp2430_gpio_data, + .vmmc1 = &sdp2430_vmmc1, }; static struct i2c_board_info __initdata sdp2430_i2c_boardinfo[] = { diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c index 31085883199e..76a260f7c00e 100644 --- a/arch/arm/mach-omap2/board-3430sdp.c +++ b/arch/arm/mach-omap2/board-3430sdp.c @@ -315,11 +315,6 @@ static struct platform_device sdp3430_dss_device = { }, }; -static struct regulator_consumer_supply sdp3430_vdda_dac_supply = { - .supply = "vdda_dac", - .dev = &sdp3430_dss_device.dev, -}; - static struct platform_device *sdp3430_devices[] __initdata = { &sdp3430_dss_device, }; @@ -369,18 +364,6 @@ static struct omap2_hsmmc_info mmc[] = { {} /* Terminator */ }; -static struct regulator_consumer_supply sdp3430_vmmc1_supply = { - .supply = "vmmc", -}; - -static struct regulator_consumer_supply sdp3430_vsim_supply = { - .supply = "vmmc_aux", -}; - -static struct regulator_consumer_supply sdp3430_vmmc2_supply = { - .supply = "vmmc", -}; - static int sdp3430_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) { @@ -391,13 +374,6 @@ static int sdp3430_twl_gpio_setup(struct device *dev, mmc[1].gpio_cd = gpio + 1; omap2_hsmmc_init(mmc); - /* link regulators to MMC adapters ... we "know" the - * regulators will be set up only *after* we return. - */ - sdp3430_vmmc1_supply.dev = mmc[0].dev; - sdp3430_vsim_supply.dev = mmc[0].dev; - sdp3430_vmmc2_supply.dev = mmc[1].dev; - /* gpio + 7 is "sub_lcd_en_bkl" (output/PWM1) */ gpio_request(gpio + 7, "sub_lcd_en_bkl"); gpio_direction_output(gpio + 7, 0); @@ -426,6 +402,34 @@ static struct twl4030_madc_platform_data sdp3430_madc_data = { .irq_line = 1, }; +/* regulator consumer mappings */ + +/* ads7846 on SPI */ +static struct regulator_consumer_supply sdp3430_vaux3_supplies[] = { + REGULATOR_SUPPLY("vcc", "spi1.0"), +}; + +static struct regulator_consumer_supply sdp3430_vdda_dac_supplies[] = { + REGULATOR_SUPPLY("vdda_dac", "omapdss"), +}; + +/* VPLL2 for digital video outputs */ +static struct regulator_consumer_supply sdp3430_vpll2_supplies[] = { + REGULATOR_SUPPLY("vdds_dsi", "omapdss"), +}; + +static struct regulator_consumer_supply sdp3430_vmmc1_supplies[] = { + REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.0"), +}; + +static struct regulator_consumer_supply sdp3430_vsim_supplies[] = { + REGULATOR_SUPPLY("vmmc_aux", "mmci-omap-hs.0"), +}; + +static struct regulator_consumer_supply sdp3430_vmmc2_supplies[] = { + REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.1"), +}; + /* * Apply all the fixed voltages since most versions of U-Boot * don't bother with that initialization. @@ -468,6 +472,8 @@ static struct regulator_init_data sdp3430_vaux3 = { .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, + .num_consumer_supplies = ARRAY_SIZE(sdp3430_vaux3_supplies), + .consumer_supplies = sdp3430_vaux3_supplies, }; /* VAUX4 for OMAP VDD_CSI2 (camera) */ @@ -494,8 +500,8 @@ static struct regulator_init_data sdp3430_vmmc1 = { | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, - .num_consumer_supplies = 1, - .consumer_supplies = &sdp3430_vmmc1_supply, + .num_consumer_supplies = ARRAY_SIZE(sdp3430_vmmc1_supplies), + .consumer_supplies = sdp3430_vmmc1_supplies, }; /* VMMC2 for MMC2 card */ @@ -509,8 +515,8 @@ static struct regulator_init_data sdp3430_vmmc2 = { .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, - .num_consumer_supplies = 1, - .consumer_supplies = &sdp3430_vmmc2_supply, + .num_consumer_supplies = ARRAY_SIZE(sdp3430_vmmc2_supplies), + .consumer_supplies = sdp3430_vmmc2_supplies, }; /* VSIM for OMAP VDD_MMC1A (i/o for DAT4..DAT7) */ @@ -524,8 +530,8 @@ static struct regulator_init_data sdp3430_vsim = { | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, - .num_consumer_supplies = 1, - .consumer_supplies = &sdp3430_vsim_supply, + .num_consumer_supplies = ARRAY_SIZE(sdp3430_vsim_supplies), + .consumer_supplies = sdp3430_vsim_supplies, }; /* VDAC for DSS driving S-Video */ @@ -539,16 +545,8 @@ static struct regulator_init_data sdp3430_vdac = { .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, - .num_consumer_supplies = 1, - .consumer_supplies = &sdp3430_vdda_dac_supply, -}; - -/* VPLL2 for digital video outputs */ -static struct regulator_consumer_supply sdp3430_vpll2_supplies[] = { - { - .supply = "vdds_dsi", - .dev = &sdp3430_dss_device.dev, - } + .num_consumer_supplies = ARRAY_SIZE(sdp3430_vdda_dac_supplies), + .consumer_supplies = sdp3430_vdda_dac_supplies, }; static struct regulator_init_data sdp3430_vpll2 = { @@ -812,7 +810,7 @@ static void __init omap_3430sdp_init(void) omap_serial_init(); usb_musb_init(&musb_board_data); board_smc91x_init(); - board_flash_init(sdp_flash_partitions, chip_sel_3430); + board_flash_init(sdp_flash_partitions, chip_sel_3430, 0); sdp3430_display_init(); enable_board_wakeup_source(); usb_ehci_init(&ehci_pdata); diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c index 16538757291a..8d1c4358ecf9 100644 --- a/arch/arm/mach-omap2/board-3630sdp.c +++ b/arch/arm/mach-omap2/board-3630sdp.c @@ -11,6 +11,7 @@ #include <linux/platform_device.h> #include <linux/input.h> #include <linux/gpio.h> +#include <linux/mtd/nand.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> @@ -208,7 +209,7 @@ static void __init omap_sdp_init(void) zoom_peripherals_init(); zoom_display_init(); board_smc91x_init(); - board_flash_init(sdp_flash_partitions, chip_sel_sdp); + board_flash_init(sdp_flash_partitions, chip_sel_sdp, NAND_BUSWIDTH_16); enable_board_wakeup_source(); usb_ehci_init(&ehci_pdata); } diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c index dcc8b27e3033..1a943be822c3 100644 --- a/arch/arm/mach-omap2/board-4430sdp.c +++ b/arch/arm/mach-omap2/board-4430sdp.c @@ -45,7 +45,6 @@ #define ETH_KS8851_IRQ 34 #define ETH_KS8851_POWER_ON 48 #define ETH_KS8851_QUART 138 -#define OMAP4SDP_MDM_PWR_EN_GPIO 157 #define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184 #define OMAP4_SFH7741_ENABLE_GPIO 188 @@ -335,16 +334,6 @@ static void __init omap_4430sdp_init_early(void) #endif } -static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, - .phy_reset = false, - .reset_gpio_port[0] = -EINVAL, - .reset_gpio_port[1] = -EINVAL, - .reset_gpio_port[2] = -EINVAL, -}; - static struct omap_musb_board_data musb_board_data = { .interface_type = MUSB_INTERFACE_UTMI, .mode = MUSB_OTG, @@ -518,7 +507,6 @@ static struct regulator_init_data sdp4430_vana = { .constraints = { .min_uV = 2100000, .max_uV = 2100000, - .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE @@ -530,7 +518,6 @@ static struct regulator_init_data sdp4430_vcxio = { .constraints = { .min_uV = 1800000, .max_uV = 1800000, - .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE @@ -542,7 +529,6 @@ static struct regulator_init_data sdp4430_vdac = { .constraints = { .min_uV = 1800000, .max_uV = 1800000, - .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE @@ -660,14 +646,6 @@ static void __init omap_4430sdp_init(void) omap_serial_init(); omap4_twl6030_hsmmc_init(mmc); - /* Power on the ULPI PHY */ - status = gpio_request(OMAP4SDP_MDM_PWR_EN_GPIO, "USBB1 PHY VMDM_3V3"); - if (status) - pr_err("%s: Could not get USBB1 PHY GPIO\n", __func__); - else - gpio_direction_output(OMAP4SDP_MDM_PWR_EN_GPIO, 1); - - usb_ehci_init(&ehci_pdata); usb_musb_init(&musb_board_data); status = omap_ethernet_init(); diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c index d0d0f5528132..8532d6e0d53a 100644 --- a/arch/arm/mach-omap2/board-am3517evm.c +++ b/arch/arm/mach-omap2/board-am3517evm.c @@ -408,6 +408,10 @@ static struct omap_musb_board_data musb_board_data = { .interface_type = MUSB_INTERFACE_ULPI, .mode = MUSB_OTG, .power = 500, + .set_phy_power = am35x_musb_phy_power, + .clear_irq = am35x_musb_clear_irq, + .set_mode = am35x_musb_set_mode, + .reset = am35x_musb_reset, }; static __init void am3517_evm_musb_init(void) diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c index fd38c05bb47f..c32c06828f08 100644 --- a/arch/arm/mach-omap2/board-flash.c +++ b/arch/arm/mach-omap2/board-flash.c @@ -1,5 +1,5 @@ /* - * board-sdp-flash.c + * board-flash.c * Modified from mach-omap2/board-3430sdp-flash.c * * Copyright (C) 2009 Nokia Corporation @@ -16,6 +16,7 @@ #include <linux/platform_device.h> #include <linux/mtd/physmap.h> #include <linux/io.h> +#include <plat/irqs.h> #include <plat/gpmc.h> #include <plat/nand.h> @@ -73,11 +74,11 @@ __init board_nor_init(struct mtd_partition *nor_parts, u8 nr_parts, u8 cs) + FLASH_SIZE_SDPV1 - 1; } if (err < 0) { - printk(KERN_ERR "NOR: Can't request GPMC CS\n"); + pr_err("NOR: Can't request GPMC CS\n"); return; } if (platform_device_register(&board_nor_device) < 0) - printk(KERN_ERR "Unable to register NOR device\n"); + pr_err("Unable to register NOR device\n"); } #if defined(CONFIG_MTD_ONENAND_OMAP2) || \ @@ -139,12 +140,16 @@ static struct omap_nand_platform_data board_nand_data = { }; void -__init board_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs) +__init board_nand_init(struct mtd_partition *nand_parts, + u8 nr_parts, u8 cs, int nand_type) { board_nand_data.cs = cs; board_nand_data.parts = nand_parts; - board_nand_data.nr_parts = nr_parts; + board_nand_data.nr_parts = nr_parts; + board_nand_data.devsize = nand_type; + board_nand_data.ecc_opt = OMAP_ECC_HAMMING_CODE_DEFAULT; + board_nand_data.gpmc_irq = OMAP_GPMC_IRQ_BASE + cs; gpmc_nand_init(&board_nand_data); } #else @@ -189,12 +194,12 @@ unmap: } /** - * sdp3430_flash_init - Identify devices connected to GPMC and register. + * board_flash_init - Identify devices connected to GPMC and register. * * @return - void. */ void board_flash_init(struct flash_partitions partition_info[], - char chip_sel_board[][GPMC_CS_NUM]) + char chip_sel_board[][GPMC_CS_NUM], int nand_type) { u8 cs = 0; u8 norcs = GPMC_CS_NUM + 1; @@ -208,7 +213,7 @@ void board_flash_init(struct flash_partitions partition_info[], */ idx = get_gpmc0_type(); if (idx >= MAX_SUPPORTED_GPMC_CONFIG) { - printk(KERN_ERR "%s: Invalid chip select: %d\n", __func__, cs); + pr_err("%s: Invalid chip select: %d\n", __func__, cs); return; } config_sel = (unsigned char *)(chip_sel_board[idx]); @@ -232,23 +237,20 @@ void board_flash_init(struct flash_partitions partition_info[], } if (norcs > GPMC_CS_NUM) - printk(KERN_INFO "NOR: Unable to find configuration " - "in GPMC\n"); + pr_err("NOR: Unable to find configuration in GPMC\n"); else board_nor_init(partition_info[0].parts, partition_info[0].nr_parts, norcs); if (onenandcs > GPMC_CS_NUM) - printk(KERN_INFO "OneNAND: Unable to find configuration " - "in GPMC\n"); + pr_err("OneNAND: Unable to find configuration in GPMC\n"); else board_onenand_init(partition_info[1].parts, partition_info[1].nr_parts, onenandcs); if (nandcs > GPMC_CS_NUM) - printk(KERN_INFO "NAND: Unable to find configuration " - "in GPMC\n"); + pr_err("NAND: Unable to find configuration in GPMC\n"); else board_nand_init(partition_info[2].parts, - partition_info[2].nr_parts, nandcs); + partition_info[2].nr_parts, nandcs, nand_type); } diff --git a/arch/arm/mach-omap2/board-flash.h b/arch/arm/mach-omap2/board-flash.h index 69befe00dd2f..c240a3f8d163 100644 --- a/arch/arm/mach-omap2/board-flash.h +++ b/arch/arm/mach-omap2/board-flash.h @@ -25,6 +25,6 @@ struct flash_partitions { }; extern void board_flash_init(struct flash_partitions [], - char chip_sel[][GPMC_CS_NUM]); + char chip_sel[][GPMC_CS_NUM], int nand_type); extern void board_nand_init(struct mtd_partition *nand_parts, - u8 nr_parts, u8 cs); + u8 nr_parts, u8 cs, int nand_type); diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c index d8eb2cb7cbc7..a3fae5697a72 100644 --- a/arch/arm/mach-omap2/board-ldp.c +++ b/arch/arm/mach-omap2/board-ldp.c @@ -433,7 +433,7 @@ static void __init omap_ldp_init(void) omap_serial_init(); usb_musb_init(&musb_board_data); board_nand_init(ldp_nand_partitions, - ARRAY_SIZE(ldp_nand_partitions), ZOOM_NAND_CS); + ARRAY_SIZE(ldp_nand_partitions), ZOOM_NAND_CS, 0); omap2_hsmmc_init(mmc); /* link regulators to MMC adapters */ diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c index c2a0fca4aa53..d4a115712290 100644 --- a/arch/arm/mach-omap2/board-omap3evm.c +++ b/arch/arm/mach-omap2/board-omap3evm.c @@ -30,6 +30,8 @@ #include <linux/usb/otg.h> #include <linux/smsc911x.h> +#include <linux/wl12xx.h> +#include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/mmc/host.h> @@ -58,6 +60,13 @@ #define OMAP3EVM_ETHR_ID_REV 0x50 #define OMAP3EVM_ETHR_GPIO_IRQ 176 #define OMAP3EVM_SMSC911X_CS 5 +/* + * Eth Reset signal + * 64 = Generation 1 (<=RevD) + * 7 = Generation 2 (>=RevE) + */ +#define OMAP3EVM_GEN1_ETHR_GPIO_RST 64 +#define OMAP3EVM_GEN2_ETHR_GPIO_RST 7 static u8 omap3_evm_version; @@ -124,10 +133,15 @@ static struct platform_device omap3evm_smsc911x_device = { static inline void __init omap3evm_init_smsc911x(void) { - int eth_cs; + int eth_cs, eth_rst; struct clk *l3ck; unsigned int rate; + if (get_omap3_evm_rev() == OMAP3EVM_BOARD_GEN_1) + eth_rst = OMAP3EVM_GEN1_ETHR_GPIO_RST; + else + eth_rst = OMAP3EVM_GEN2_ETHR_GPIO_RST; + eth_cs = OMAP3EVM_SMSC911X_CS; l3ck = clk_get(NULL, "l3_ck"); @@ -136,6 +150,27 @@ static inline void __init omap3evm_init_smsc911x(void) else rate = clk_get_rate(l3ck); + /* Configure ethernet controller reset gpio */ + if (cpu_is_omap3430()) { + if (gpio_request(eth_rst, "SMSC911x gpio") < 0) { + pr_err(KERN_ERR "Failed to request %d for smsc911x\n", + eth_rst); + return; + } + + if (gpio_direction_output(eth_rst, 1) < 0) { + pr_err(KERN_ERR "Failed to set direction of %d for" \ + " smsc911x\n", eth_rst); + return; + } + /* reset pulse to ethernet controller*/ + usleep_range(150, 220); + gpio_set_value(eth_rst, 0); + usleep_range(150, 220); + gpio_set_value(eth_rst, 1); + usleep_range(1, 2); + } + if (gpio_request(OMAP3EVM_ETHR_GPIO_IRQ, "SMSC911x irq") < 0) { printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n", OMAP3EVM_ETHR_GPIO_IRQ); @@ -235,9 +270,9 @@ static int omap3_evm_enable_lcd(struct omap_dss_device *dssdev) gpio_set_value(OMAP3EVM_LCD_PANEL_ENVDD, 0); if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) - gpio_set_value(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 0); + gpio_set_value_cansleep(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 0); else - gpio_set_value(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 1); + gpio_set_value_cansleep(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 1); lcd_enabled = 1; return 0; @@ -248,9 +283,9 @@ static void omap3_evm_disable_lcd(struct omap_dss_device *dssdev) gpio_set_value(OMAP3EVM_LCD_PANEL_ENVDD, 1); if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) - gpio_set_value(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 1); + gpio_set_value_cansleep(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 1); else - gpio_set_value(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 0); + gpio_set_value_cansleep(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 0); lcd_enabled = 0; } @@ -289,7 +324,7 @@ static int omap3_evm_enable_dvi(struct omap_dss_device *dssdev) return -EINVAL; } - gpio_set_value(OMAP3EVM_DVI_PANEL_EN_GPIO, 1); + gpio_set_value_cansleep(OMAP3EVM_DVI_PANEL_EN_GPIO, 1); dvi_enabled = 1; return 0; @@ -297,7 +332,7 @@ static int omap3_evm_enable_dvi(struct omap_dss_device *dssdev) static void omap3_evm_disable_dvi(struct omap_dss_device *dssdev) { - gpio_set_value(OMAP3EVM_DVI_PANEL_EN_GPIO, 0); + gpio_set_value_cansleep(OMAP3EVM_DVI_PANEL_EN_GPIO, 0); dvi_enabled = 0; } @@ -381,6 +416,16 @@ static struct omap2_hsmmc_info mmc[] = { .gpio_cd = -EINVAL, .gpio_wp = 63, }, +#ifdef CONFIG_WL12XX_PLATFORM_DATA + { + .name = "wl1271", + .mmc = 2, + .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD, + .gpio_wp = -EINVAL, + .gpio_cd = -EINVAL, + .nonremovable = true, + }, +#endif {} /* Terminator */ }; @@ -411,6 +456,8 @@ static struct platform_device leds_gpio = { static int omap3evm_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) { + int r; + /* gpio + 0 is "mmc0_cd" (input/IRQ) */ omap_mux_init_gpio(63, OMAP_PIN_INPUT); mmc[0].gpio_cd = gpio + 0; @@ -426,8 +473,12 @@ static int omap3evm_twl_gpio_setup(struct device *dev, */ /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */ - gpio_request(gpio + TWL4030_GPIO_MAX, "EN_LCD_BKL"); - gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0); + r = gpio_request(gpio + TWL4030_GPIO_MAX, "EN_LCD_BKL"); + if (!r) + r = gpio_direction_output(gpio + TWL4030_GPIO_MAX, + (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) ? 1 : 0); + if (r) + printk(KERN_ERR "failed to get/set lcd_bkl gpio\n"); /* gpio + 7 == DVI Enable */ gpio_request(gpio + 7, "EN_DVI"); @@ -538,6 +589,69 @@ static struct regulator_init_data omap3_evm_vpll2 = { .consumer_supplies = &omap3_evm_vpll2_supply, }; +/* ads7846 on SPI */ +static struct regulator_consumer_supply omap3evm_vio_supply = + REGULATOR_SUPPLY("vcc", "spi1.0"); + +/* VIO for ads7846 */ +static struct regulator_init_data omap3evm_vio = { + .constraints = { + .min_uV = 1800000, + .max_uV = 1800000, + .apply_uV = true, + .valid_modes_mask = REGULATOR_MODE_NORMAL + | REGULATOR_MODE_STANDBY, + .valid_ops_mask = REGULATOR_CHANGE_MODE + | REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = 1, + .consumer_supplies = &omap3evm_vio_supply, +}; + +#ifdef CONFIG_WL12XX_PLATFORM_DATA + +#define OMAP3EVM_WLAN_PMENA_GPIO (150) +#define OMAP3EVM_WLAN_IRQ_GPIO (149) + +static struct regulator_consumer_supply omap3evm_vmmc2_supply = { + .supply = "vmmc", + .dev_name = "mmci-omap-hs.1", +}; + +/* VMMC2 for driving the WL12xx module */ +static struct regulator_init_data omap3evm_vmmc2 = { + .constraints = { + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = 1, + .consumer_supplies = &omap3evm_vmmc2_supply, +}; + +static struct fixed_voltage_config omap3evm_vwlan = { + .supply_name = "vwl1271", + .microvolts = 1800000, /* 1.80V */ + .gpio = OMAP3EVM_WLAN_PMENA_GPIO, + .startup_delay = 70000, /* 70ms */ + .enable_high = 1, + .enabled_at_boot = 0, + .init_data = &omap3evm_vmmc2, +}; + +static struct platform_device omap3evm_vwlan_device = { + .name = "reg-fixed-voltage", + .id = 1, + .dev = { + .platform_data = &omap3evm_vwlan, + }, +}; + +struct wl12xx_platform_data omap3evm_wlan_data __initdata = { + .irq = OMAP_GPIO_IRQ(OMAP3EVM_WLAN_IRQ_GPIO), + /* ref clock is 38.4 MHz */ + .board_ref_clock = 2, +}; +#endif + static struct twl4030_platform_data omap3evm_twldata = { .irq_base = TWL4030_IRQ_BASE, .irq_end = TWL4030_IRQ_END, @@ -550,6 +664,7 @@ static struct twl4030_platform_data omap3evm_twldata = { .codec = &omap3evm_codec_data, .vdac = &omap3_evm_vdac, .vpll2 = &omap3_evm_vpll2, + .vio = &omap3evm_vio, }; static struct i2c_board_info __initdata omap3evm_i2c_boardinfo[] = { @@ -651,14 +766,61 @@ static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = { }; #ifdef CONFIG_OMAP_MUX -static struct omap_board_mux board_mux[] __initdata = { +static struct omap_board_mux omap35x_board_mux[] __initdata = { + OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP | + OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW | + OMAP_PIN_OFF_WAKEUPENABLE), + OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP | + OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW | + OMAP_PIN_OFF_WAKEUPENABLE), + OMAP3_MUX(SYS_BOOT5, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP | + OMAP_PIN_OFF_NONE), + OMAP3_MUX(GPMC_WAIT2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP | + OMAP_PIN_OFF_NONE), +#ifdef CONFIG_WL12XX_PLATFORM_DATA + /* WLAN IRQ - GPIO 149 */ + OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP), + + /* WLAN POWER ENABLE - GPIO 150 */ + OMAP3_MUX(UART1_CTS, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT), + + /* MMC2 SDIO pin muxes for WL12xx */ + OMAP3_MUX(SDMMC2_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP), + OMAP3_MUX(SDMMC2_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP), + OMAP3_MUX(SDMMC2_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP), + OMAP3_MUX(SDMMC2_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP), + OMAP3_MUX(SDMMC2_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP), + OMAP3_MUX(SDMMC2_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP), +#endif + { .reg_offset = OMAP_MUX_TERMINATOR }, +}; + +static struct omap_board_mux omap36x_board_mux[] __initdata = { OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW | OMAP_PIN_OFF_WAKEUPENABLE), OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP | - OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW), + OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW | + OMAP_PIN_OFF_WAKEUPENABLE), + /* AM/DM37x EVM: DSS data bus muxed with sys_boot */ + OMAP3_MUX(DSS_DATA18, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE), + OMAP3_MUX(DSS_DATA19, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE), + OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE), + OMAP3_MUX(DSS_DATA21, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE), + OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE), + OMAP3_MUX(DSS_DATA23, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE), + OMAP3_MUX(SYS_BOOT0, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE), + OMAP3_MUX(SYS_BOOT1, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE), + OMAP3_MUX(SYS_BOOT3, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE), + OMAP3_MUX(SYS_BOOT4, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE), + OMAP3_MUX(SYS_BOOT5, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE), + OMAP3_MUX(SYS_BOOT6, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE), + { .reg_offset = OMAP_MUX_TERMINATOR }, }; +#else +#define omap35x_board_mux NULL +#define omap36x_board_mux NULL #endif static struct omap_musb_board_data musb_board_data = { @@ -670,7 +832,11 @@ static struct omap_musb_board_data musb_board_data = { static void __init omap3_evm_init(void) { omap3_evm_get_revision(); - omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); + + if (cpu_is_omap3630()) + omap3_mux_init(omap36x_board_mux, OMAP_PACKAGE_CBB); + else + omap3_mux_init(omap35x_board_mux, OMAP_PACKAGE_CBB); omap3_evm_i2c_init(); @@ -714,6 +880,13 @@ static void __init omap3_evm_init(void) ads7846_dev_init(); omap3evm_init_smsc911x(); omap3_evm_display_init(); + +#ifdef CONFIG_WL12XX_PLATFORM_DATA + /* WL12xx WLAN Init */ + if (wl12xx_set_platform_data(&omap3evm_wlan_data)) + pr_err("error setting wl12xx data\n"); + platform_device_register(&omap3evm_vwlan_device); +#endif } MACHINE_START(OMAP3EVM, "OMAP3 EVM") diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c index fca5b9e80c18..3dd241b95159 100644 --- a/arch/arm/mach-omap2/board-omap4panda.c +++ b/arch/arm/mach-omap2/board-omap4panda.c @@ -26,6 +26,8 @@ #include <linux/usb/otg.h> #include <linux/i2c/twl.h> #include <linux/regulator/machine.h> +#include <linux/regulator/fixed.h> +#include <linux/wl12xx.h> #include <mach/hardware.h> #include <mach/omap4-common.h> @@ -45,6 +47,8 @@ #define GPIO_HUB_POWER 1 #define GPIO_HUB_NRESET 62 +#define GPIO_WIFI_PMENA 43 +#define GPIO_WIFI_IRQ 53 static struct gpio_led gpio_leds[] = { { @@ -161,6 +165,15 @@ static struct omap2_hsmmc_info mmc[] = { .gpio_wp = -EINVAL, .gpio_cd = -EINVAL, }, + { + .name = "wl1271", + .mmc = 5, + .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD, + .gpio_wp = -EINVAL, + .gpio_cd = -EINVAL, + .ocr_mask = MMC_VDD_165_195, + .nonremovable = true, + }, {} /* Terminator */ }; @@ -171,6 +184,43 @@ static struct regulator_consumer_supply omap4_panda_vmmc_supply[] = { }, }; +static struct regulator_consumer_supply omap4_panda_vmmc5_supply = { + .supply = "vmmc", + .dev_name = "mmci-omap-hs.4", +}; + +static struct regulator_init_data panda_vmmc5 = { + .constraints = { + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = 1, + .consumer_supplies = &omap4_panda_vmmc5_supply, +}; + +static struct fixed_voltage_config panda_vwlan = { + .supply_name = "vwl1271", + .microvolts = 1800000, /* 1.8V */ + .gpio = GPIO_WIFI_PMENA, + .startup_delay = 70000, /* 70msec */ + .enable_high = 1, + .enabled_at_boot = 0, + .init_data = &panda_vmmc5, +}; + +static struct platform_device omap_vwlan_device = { + .name = "reg-fixed-voltage", + .id = 1, + .dev = { + .platform_data = &panda_vwlan, + }, +}; + +struct wl12xx_platform_data omap_panda_wlan_data __initdata = { + .irq = OMAP_GPIO_IRQ(GPIO_WIFI_IRQ), + /* PANDA ref clock is 38.4 MHz */ + .board_ref_clock = 2, +}; + static int omap4_twl6030_hsmmc_late_init(struct device *dev) { int ret = 0; @@ -304,7 +354,6 @@ static struct regulator_init_data omap4_panda_vana = { .constraints = { .min_uV = 2100000, .max_uV = 2100000, - .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE @@ -316,7 +365,6 @@ static struct regulator_init_data omap4_panda_vcxio = { .constraints = { .min_uV = 1800000, .max_uV = 1800000, - .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE @@ -328,7 +376,6 @@ static struct regulator_init_data omap4_panda_vdac = { .constraints = { .min_uV = 1800000, .max_uV = 1800000, - .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE @@ -390,6 +437,19 @@ static int __init omap4_panda_i2c_init(void) #ifdef CONFIG_OMAP_MUX static struct omap_board_mux board_mux[] __initdata = { + /* WLAN IRQ - GPIO 53 */ + OMAP4_MUX(GPMC_NCS3, OMAP_MUX_MODE3 | OMAP_PIN_INPUT), + /* WLAN POWER ENABLE - GPIO 43 */ + OMAP4_MUX(GPMC_A19, OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT), + /* WLAN SDIO: MMC5 CMD */ + OMAP4_MUX(SDMMC5_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP), + /* WLAN SDIO: MMC5 CLK */ + OMAP4_MUX(SDMMC5_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP), + /* WLAN SDIO: MMC5 DAT[0-3] */ + OMAP4_MUX(SDMMC5_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP), + OMAP4_MUX(SDMMC5_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP), + OMAP4_MUX(SDMMC5_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP), + OMAP4_MUX(SDMMC5_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP), { .reg_offset = OMAP_MUX_TERMINATOR }, }; #else @@ -404,8 +464,12 @@ static void __init omap4_panda_init(void) package = OMAP_PACKAGE_CBL; omap4_mux_init(board_mux, package); + if (wl12xx_set_platform_data(&omap_panda_wlan_data)) + pr_err("error setting wl12xx data\n"); + omap4_panda_i2c_init(); platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices)); + platform_device_register(&omap_vwlan_device); omap_serial_init(); omap4_twl6030_hsmmc_init(mmc); omap4_ehci_init(); diff --git a/arch/arm/mach-omap2/board-zoom.c b/arch/arm/mach-omap2/board-zoom.c index 85d4170f30ab..7e3f1595d77b 100644 --- a/arch/arm/mach-omap2/board-zoom.c +++ b/arch/arm/mach-omap2/board-zoom.c @@ -16,6 +16,7 @@ #include <linux/input.h> #include <linux/gpio.h> #include <linux/i2c/twl.h> +#include <linux/mtd/nand.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> @@ -124,8 +125,8 @@ static void __init omap_zoom_init(void) usb_ehci_init(&ehci_pdata); } - board_nand_init(zoom_nand_partitions, - ARRAY_SIZE(zoom_nand_partitions), ZOOM_NAND_CS); + board_nand_init(zoom_nand_partitions, ARRAY_SIZE(zoom_nand_partitions), + ZOOM_NAND_CS, NAND_BUSWIDTH_16); zoom_debugboard_init(); zoom_peripherals_init(); zoom_display_init(); diff --git a/arch/arm/mach-omap2/clkt_clksel.c b/arch/arm/mach-omap2/clkt_clksel.c index a781cd6795a4..e25364de028a 100644 --- a/arch/arm/mach-omap2/clkt_clksel.c +++ b/arch/arm/mach-omap2/clkt_clksel.c @@ -97,7 +97,7 @@ static u8 _get_div_and_fieldval(struct clk *src_clk, struct clk *clk, u32 *field_val) { const struct clksel *clks; - const struct clksel_rate *clkr, *max_clkr; + const struct clksel_rate *clkr, *max_clkr = NULL; u8 max_div = 0; clks = _get_clksel_by_parent(clk, src_clk); diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index 8800486f9467..9ee876fd367a 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -15,6 +15,7 @@ #include <linux/io.h> #include <linux/clk.h> #include <linux/err.h> +#include <linux/slab.h> #include <mach/hardware.h> #include <mach/irqs.h> @@ -320,163 +321,55 @@ static inline void omap_init_audio(void) {} #include <plat/mcspi.h> -#define OMAP2_MCSPI1_BASE 0x48098000 -#define OMAP2_MCSPI2_BASE 0x4809a000 -#define OMAP2_MCSPI3_BASE 0x480b8000 -#define OMAP2_MCSPI4_BASE 0x480ba000 - -#define OMAP4_MCSPI1_BASE 0x48098100 -#define OMAP4_MCSPI2_BASE 0x4809a100 -#define OMAP4_MCSPI3_BASE 0x480b8100 -#define OMAP4_MCSPI4_BASE 0x480ba100 - -static struct omap2_mcspi_platform_config omap2_mcspi1_config = { - .num_cs = 4, -}; - -static struct resource omap2_mcspi1_resources[] = { - { - .start = OMAP2_MCSPI1_BASE, - .end = OMAP2_MCSPI1_BASE + 0xff, - .flags = IORESOURCE_MEM, - }, -}; - -static struct platform_device omap2_mcspi1 = { - .name = "omap2_mcspi", - .id = 1, - .num_resources = ARRAY_SIZE(omap2_mcspi1_resources), - .resource = omap2_mcspi1_resources, - .dev = { - .platform_data = &omap2_mcspi1_config, - }, -}; - -static struct omap2_mcspi_platform_config omap2_mcspi2_config = { - .num_cs = 2, -}; - -static struct resource omap2_mcspi2_resources[] = { - { - .start = OMAP2_MCSPI2_BASE, - .end = OMAP2_MCSPI2_BASE + 0xff, - .flags = IORESOURCE_MEM, - }, -}; - -static struct platform_device omap2_mcspi2 = { - .name = "omap2_mcspi", - .id = 2, - .num_resources = ARRAY_SIZE(omap2_mcspi2_resources), - .resource = omap2_mcspi2_resources, - .dev = { - .platform_data = &omap2_mcspi2_config, - }, -}; - -#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ - defined(CONFIG_ARCH_OMAP4) -static struct omap2_mcspi_platform_config omap2_mcspi3_config = { - .num_cs = 2, -}; - -static struct resource omap2_mcspi3_resources[] = { - { - .start = OMAP2_MCSPI3_BASE, - .end = OMAP2_MCSPI3_BASE + 0xff, - .flags = IORESOURCE_MEM, - }, -}; - -static struct platform_device omap2_mcspi3 = { - .name = "omap2_mcspi", - .id = 3, - .num_resources = ARRAY_SIZE(omap2_mcspi3_resources), - .resource = omap2_mcspi3_resources, - .dev = { - .platform_data = &omap2_mcspi3_config, - }, -}; -#endif - -#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) -static struct omap2_mcspi_platform_config omap2_mcspi4_config = { - .num_cs = 1, -}; - -static struct resource omap2_mcspi4_resources[] = { - { - .start = OMAP2_MCSPI4_BASE, - .end = OMAP2_MCSPI4_BASE + 0xff, - .flags = IORESOURCE_MEM, - }, -}; - -static struct platform_device omap2_mcspi4 = { - .name = "omap2_mcspi", - .id = 4, - .num_resources = ARRAY_SIZE(omap2_mcspi4_resources), - .resource = omap2_mcspi4_resources, - .dev = { - .platform_data = &omap2_mcspi4_config, +struct omap_device_pm_latency omap_mcspi_latency[] = { + [0] = { + .deactivate_func = omap_device_idle_hwmods, + .activate_func = omap_device_enable_hwmods, + .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST, }, }; -#endif -#ifdef CONFIG_ARCH_OMAP4 -static inline void omap4_mcspi_fixup(void) +static int omap_mcspi_init(struct omap_hwmod *oh, void *unused) { - omap2_mcspi1_resources[0].start = OMAP4_MCSPI1_BASE; - omap2_mcspi1_resources[0].end = OMAP4_MCSPI1_BASE + 0xff; - omap2_mcspi2_resources[0].start = OMAP4_MCSPI2_BASE; - omap2_mcspi2_resources[0].end = OMAP4_MCSPI2_BASE + 0xff; - omap2_mcspi3_resources[0].start = OMAP4_MCSPI3_BASE; - omap2_mcspi3_resources[0].end = OMAP4_MCSPI3_BASE + 0xff; - omap2_mcspi4_resources[0].start = OMAP4_MCSPI4_BASE; - omap2_mcspi4_resources[0].end = OMAP4_MCSPI4_BASE + 0xff; -} -#else -static inline void omap4_mcspi_fixup(void) -{ -} -#endif + struct omap_device *od; + char *name = "omap2_mcspi"; + struct omap2_mcspi_platform_config *pdata; + static int spi_num; + struct omap2_mcspi_dev_attr *mcspi_attrib = oh->dev_attr; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + pr_err("Memory allocation for McSPI device failed\n"); + return -ENOMEM; + } -#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ - defined(CONFIG_ARCH_OMAP4) -static inline void omap2_mcspi3_init(void) -{ - platform_device_register(&omap2_mcspi3); -} -#else -static inline void omap2_mcspi3_init(void) -{ -} -#endif + pdata->num_cs = mcspi_attrib->num_chipselect; + switch (oh->class->rev) { + case OMAP2_MCSPI_REV: + case OMAP3_MCSPI_REV: + pdata->regs_offset = 0; + break; + case OMAP4_MCSPI_REV: + pdata->regs_offset = OMAP4_MCSPI_REG_OFFSET; + break; + default: + pr_err("Invalid McSPI Revision value\n"); + return -EINVAL; + } -#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) -static inline void omap2_mcspi4_init(void) -{ - platform_device_register(&omap2_mcspi4); -} -#else -static inline void omap2_mcspi4_init(void) -{ + spi_num++; + od = omap_device_build(name, spi_num, oh, pdata, + sizeof(*pdata), omap_mcspi_latency, + ARRAY_SIZE(omap_mcspi_latency), 0); + WARN(IS_ERR(od), "Cant build omap_device for %s:%s\n", + name, oh->name); + kfree(pdata); + return 0; } -#endif static void omap_init_mcspi(void) { - if (cpu_is_omap44xx()) - omap4_mcspi_fixup(); - - platform_device_register(&omap2_mcspi1); - platform_device_register(&omap2_mcspi2); - - if (cpu_is_omap2430() || cpu_is_omap343x() || cpu_is_omap44xx()) - omap2_mcspi3_init(); - - if (cpu_is_omap343x() || cpu_is_omap44xx()) - omap2_mcspi4_init(); + omap_hwmod_for_each_by_class("mcspi", omap_mcspi_init, NULL); } #else diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c index 2bb29c160702..c1791d08ae56 100644 --- a/arch/arm/mach-omap2/gpmc-nand.c +++ b/arch/arm/mach-omap2/gpmc-nand.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/io.h> +#include <linux/mtd/nand.h> #include <asm/mach/flash.h> @@ -69,8 +70,10 @@ static int omap2_nand_gpmc_retime(void) t.wr_cycle = gpmc_round_ns_to_ticks(gpmc_nand_data->gpmc_t->wr_cycle); /* Configure GPMC */ - gpmc_cs_configure(gpmc_nand_data->cs, - GPMC_CONFIG_DEV_SIZE, gpmc_nand_data->devsize); + if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16) + gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_DEV_SIZE, 1); + else + gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_DEV_SIZE, 0); gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_DEV_TYPE, GPMC_DEVICETYPE_NAND); err = gpmc_cs_set_timings(gpmc_nand_data->cs, &t); diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c index 3a7d25fb00ef..d776ded9830d 100644 --- a/arch/arm/mach-omap2/gpmc-onenand.c +++ b/arch/arm/mach-omap2/gpmc-onenand.c @@ -94,7 +94,7 @@ static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base) } static void set_onenand_cfg(void __iomem *onenand_base, int latency, - int sync_read, int sync_write, int hf) + int sync_read, int sync_write, int hf, int vhf) { u32 reg; @@ -114,12 +114,57 @@ static void set_onenand_cfg(void __iomem *onenand_base, int latency, reg |= ONENAND_SYS_CFG1_HF; else reg &= ~ONENAND_SYS_CFG1_HF; + if (vhf) + reg |= ONENAND_SYS_CFG1_VHF; + else + reg &= ~ONENAND_SYS_CFG1_VHF; writew(reg, onenand_base + ONENAND_REG_SYS_CFG1); } +static int omap2_onenand_get_freq(struct omap_onenand_platform_data *cfg, + void __iomem *onenand_base, bool *clk_dep) +{ + u16 ver = readw(onenand_base + ONENAND_REG_VERSION_ID); + int freq = 0; + + if (cfg->get_freq) { + struct onenand_freq_info fi; + + fi.maf_id = readw(onenand_base + ONENAND_REG_MANUFACTURER_ID); + fi.dev_id = readw(onenand_base + ONENAND_REG_DEVICE_ID); + fi.ver_id = ver; + freq = cfg->get_freq(&fi, clk_dep); + if (freq) + return freq; + } + + switch ((ver >> 4) & 0xf) { + case 0: + freq = 40; + break; + case 1: + freq = 54; + break; + case 2: + freq = 66; + break; + case 3: + freq = 83; + break; + case 4: + freq = 104; + break; + default: + freq = 54; + break; + } + + return freq; +} + static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg, void __iomem *onenand_base, - int freq) + int *freq_ptr) { struct gpmc_timings t; const int t_cer = 15; @@ -130,10 +175,11 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg, const int t_wph = 30; int min_gpmc_clk_period, t_ces, t_avds, t_avdh, t_ach, t_aavdh, t_rdyo; int tick_ns, div, fclk_offset_ns, fclk_offset, gpmc_clk_ns, latency; - int first_time = 0, hf = 0, sync_read = 0, sync_write = 0; + int first_time = 0, hf = 0, vhf = 0, sync_read = 0, sync_write = 0; int err, ticks_cez; - int cs = cfg->cs; + int cs = cfg->cs, freq = *freq_ptr; u32 reg; + bool clk_dep = false; if (cfg->flags & ONENAND_SYNC_READ) { sync_read = 1; @@ -148,27 +194,7 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg, err = omap2_onenand_set_async_mode(cs, onenand_base); if (err) return err; - reg = readw(onenand_base + ONENAND_REG_VERSION_ID); - switch ((reg >> 4) & 0xf) { - case 0: - freq = 40; - break; - case 1: - freq = 54; - break; - case 2: - freq = 66; - break; - case 3: - freq = 83; - break; - case 4: - freq = 104; - break; - default: - freq = 54; - break; - } + freq = omap2_onenand_get_freq(cfg, onenand_base, &clk_dep); first_time = 1; } @@ -180,7 +206,7 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg, t_avdh = 2; t_ach = 3; t_aavdh = 6; - t_rdyo = 9; + t_rdyo = 6; break; case 83: min_gpmc_clk_period = 12000; /* 83 MHz */ @@ -217,16 +243,36 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg, gpmc_clk_ns = gpmc_ticks_to_ns(div); if (gpmc_clk_ns < 15) /* >66Mhz */ hf = 1; - if (hf) + if (gpmc_clk_ns < 12) /* >83Mhz */ + vhf = 1; + if (vhf) + latency = 8; + else if (hf) latency = 6; else if (gpmc_clk_ns >= 25) /* 40 MHz*/ latency = 3; else latency = 4; + if (clk_dep) { + if (gpmc_clk_ns < 12) { /* >83Mhz */ + t_ces = 3; + t_avds = 4; + } else if (gpmc_clk_ns < 15) { /* >66Mhz */ + t_ces = 5; + t_avds = 4; + } else if (gpmc_clk_ns < 25) { /* >40Mhz */ + t_ces = 6; + t_avds = 5; + } else { + t_ces = 7; + t_avds = 7; + } + } + if (first_time) set_onenand_cfg(onenand_base, latency, - sync_read, sync_write, hf); + sync_read, sync_write, hf, vhf); if (div == 1) { reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG2); @@ -264,6 +310,9 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg, /* Read */ t.adv_rd_off = gpmc_ticks_to_ns(fclk_offset + gpmc_ns_to_ticks(t_avdh)); t.oe_on = gpmc_ticks_to_ns(fclk_offset + gpmc_ns_to_ticks(t_ach)); + /* Force at least 1 clk between AVD High to OE Low */ + if (t.oe_on <= t.adv_rd_off) + t.oe_on = t.adv_rd_off + gpmc_round_ns_to_ticks(1); t.access = gpmc_ticks_to_ns(fclk_offset + (latency + 1) * div); t.oe_off = t.access + gpmc_round_ns_to_ticks(1); t.cs_rd_off = t.oe_off; @@ -317,18 +366,20 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg, if (err) return err; - set_onenand_cfg(onenand_base, latency, sync_read, sync_write, hf); + set_onenand_cfg(onenand_base, latency, sync_read, sync_write, hf, vhf); + + *freq_ptr = freq; return 0; } -static int gpmc_onenand_setup(void __iomem *onenand_base, int freq) +static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr) { struct device *dev = &gpmc_onenand_device.dev; /* Set sync timings in GPMC */ if (omap2_onenand_set_sync_mode(gpmc_onenand_data, onenand_base, - freq) < 0) { + freq_ptr) < 0) { dev_err(dev, "Unable to set synchronous mode\n"); return -EINVAL; } diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index 1b7b3e7d02f7..674174365f78 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c @@ -14,6 +14,7 @@ */ #undef DEBUG +#include <linux/irq.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/err.h> @@ -22,6 +23,7 @@ #include <linux/spinlock.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/interrupt.h> #include <asm/mach-types.h> #include <plat/gpmc.h> @@ -58,7 +60,6 @@ #define GPMC_CHUNK_SHIFT 24 /* 16 MB */ #define GPMC_SECTION_SHIFT 28 /* 128 MB */ -#define PREFETCH_FIFOTHRESHOLD (0x40 << 8) #define CS_NUM_SHIFT 24 #define ENABLE_PREFETCH (0x1 << 7) #define DMA_MPU_MODE 2 @@ -100,6 +101,8 @@ static void __iomem *gpmc_base; static struct clk *gpmc_l3_clk; +static irqreturn_t gpmc_handle_irq(int irq, void *dev); + static void gpmc_write_reg(int idx, u32 val) { __raw_writel(val, gpmc_base + idx); @@ -497,6 +500,10 @@ int gpmc_cs_configure(int cs, int cmd, int wval) u32 regval = 0; switch (cmd) { + case GPMC_ENABLE_IRQ: + gpmc_write_reg(GPMC_IRQENABLE, wval); + break; + case GPMC_SET_IRQ_STATUS: gpmc_write_reg(GPMC_IRQSTATUS, wval); break; @@ -598,15 +605,19 @@ EXPORT_SYMBOL(gpmc_nand_write); /** * gpmc_prefetch_enable - configures and starts prefetch transfer * @cs: cs (chip select) number + * @fifo_th: fifo threshold to be used for read/ write * @dma_mode: dma mode enable (1) or disable (0) * @u32_count: number of bytes to be transferred * @is_write: prefetch read(0) or write post(1) mode */ -int gpmc_prefetch_enable(int cs, int dma_mode, +int gpmc_prefetch_enable(int cs, int fifo_th, int dma_mode, unsigned int u32_count, int is_write) { - if (!(gpmc_read_reg(GPMC_PREFETCH_CONTROL))) { + if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX) { + pr_err("gpmc: fifo threshold is not supported\n"); + return -1; + } else if (!(gpmc_read_reg(GPMC_PREFETCH_CONTROL))) { /* Set the amount of bytes to be prefetched */ gpmc_write_reg(GPMC_PREFETCH_CONFIG2, u32_count); @@ -614,7 +625,7 @@ int gpmc_prefetch_enable(int cs, int dma_mode, * enable the engine. Set which cs is has requested for. */ gpmc_write_reg(GPMC_PREFETCH_CONFIG1, ((cs << CS_NUM_SHIFT) | - PREFETCH_FIFOTHRESHOLD | + PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH | (dma_mode << DMA_MPU_MODE) | (0x1 & is_write))); @@ -678,9 +689,10 @@ static void __init gpmc_mem_init(void) } } -void __init gpmc_init(void) +static int __init gpmc_init(void) { - u32 l; + u32 l, irq; + int cs, ret = -EINVAL; char *ck = NULL; if (cpu_is_omap24xx()) { @@ -698,7 +710,7 @@ void __init gpmc_init(void) } if (WARN_ON(!ck)) - return; + return ret; gpmc_l3_clk = clk_get(NULL, ck); if (IS_ERR(gpmc_l3_clk)) { @@ -723,6 +735,36 @@ void __init gpmc_init(void) l |= (0x02 << 3) | (1 << 0); gpmc_write_reg(GPMC_SYSCONFIG, l); gpmc_mem_init(); + + /* initalize the irq_chained */ + irq = OMAP_GPMC_IRQ_BASE; + for (cs = 0; cs < GPMC_CS_NUM; cs++) { + set_irq_handler(irq, handle_simple_irq); + set_irq_flags(irq, IRQF_VALID); + irq++; + } + + ret = request_irq(INT_34XX_GPMC_IRQ, + gpmc_handle_irq, IRQF_SHARED, "gpmc", gpmc_base); + if (ret) + pr_err("gpmc: irq-%d could not claim: err %d\n", + INT_34XX_GPMC_IRQ, ret); + return ret; +} +postcore_initcall(gpmc_init); + +static irqreturn_t gpmc_handle_irq(int irq, void *dev) +{ + u8 cs; + + if (irq != INT_34XX_GPMC_IRQ) + return IRQ_HANDLED; + /* check cs to invoke the irq */ + cs = ((gpmc_read_reg(GPMC_PREFETCH_CONFIG1)) >> CS_NUM_SHIFT) & 0x7; + if (OMAP_GPMC_IRQ_BASE+cs <= OMAP_GPMC_IRQ_END) + generic_handle_irq(OMAP_GPMC_IRQ_BASE+cs); + + return IRQ_HANDLED; } #ifdef CONFIG_ARCH_OMAP3 diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c index 34272e4863fd..5496bc7d40ad 100644 --- a/arch/arm/mach-omap2/hsmmc.c +++ b/arch/arm/mach-omap2/hsmmc.c @@ -350,6 +350,11 @@ void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers) mmc->slots[0].after_set_reg = NULL; } break; + case 4: + case 5: + mmc->slots[0].before_set_reg = NULL; + mmc->slots[0].after_set_reg = NULL; + break; default: pr_err("MMC%d configuration not supported!\n", c->mmc); kfree(mmc); diff --git a/arch/arm/mach-omap2/hwspinlock.c b/arch/arm/mach-omap2/hwspinlock.c new file mode 100644 index 000000000000..06d4a80660a5 --- /dev/null +++ b/arch/arm/mach-omap2/hwspinlock.c @@ -0,0 +1,63 @@ +/* + * OMAP hardware spinlock device initialization + * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com + * + * Contact: Simon Que <[email protected]> + * Hari Kanigeri <[email protected]> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/err.h> + +#include <plat/omap_hwmod.h> +#include <plat/omap_device.h> + +struct omap_device_pm_latency omap_spinlock_latency[] = { + { + .deactivate_func = omap_device_idle_hwmods, + .activate_func = omap_device_enable_hwmods, + .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST, + } +}; + +int __init hwspinlocks_init(void) +{ + int retval = 0; + struct omap_hwmod *oh; + struct omap_device *od; + const char *oh_name = "spinlock"; + const char *dev_name = "omap_hwspinlock"; + + /* + * Hwmod lookup will fail in case our platform doesn't support the + * hardware spinlock module, so it is safe to run this initcall + * on all omaps + */ + oh = omap_hwmod_lookup(oh_name); + if (oh == NULL) + return -EINVAL; + + od = omap_device_build(dev_name, 0, oh, NULL, 0, + omap_spinlock_latency, + ARRAY_SIZE(omap_spinlock_latency), false); + if (IS_ERR(od)) { + pr_err("Can't build omap_device for %s:%s\n", dev_name, + oh_name); + retval = PTR_ERR(od); + } + + return retval; +} +/* early board code might need to reserve specific hwspinlock instances */ +postcore_initcall(hwspinlocks_init); diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index b8b49e4ae928..657f3c84687c 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -30,7 +30,6 @@ #include <plat/sram.h> #include <plat/sdrc.h> -#include <plat/gpmc.h> #include <plat/serial.h> #include "clock2xxx.h" @@ -422,7 +421,6 @@ void __init omap2_init_common_devices(struct omap_sdrc_params *sdrc_cs0, omap2_sdrc_init(sdrc_cs0, sdrc_cs1); _omap2_init_reprogram_sdrc(); } - gpmc_init(); omap_irq_base_init(); } diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c index b85c630b64d6..7fffd340c76f 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c @@ -18,6 +18,7 @@ #include <plat/serial.h> #include <plat/i2c.h> #include <plat/gpio.h> +#include <plat/mcspi.h> #include "omap_hwmod_common_data.h" @@ -44,6 +45,8 @@ static struct omap_hwmod omap2420_gpio2_hwmod; static struct omap_hwmod omap2420_gpio3_hwmod; static struct omap_hwmod omap2420_gpio4_hwmod; static struct omap_hwmod omap2420_dma_system_hwmod; +static struct omap_hwmod omap2420_mcspi1_hwmod; +static struct omap_hwmod omap2420_mcspi2_hwmod; /* L3 -> L4_CORE interface */ static struct omap_hwmod_ocp_if omap2420_l3_main__l4_core = { @@ -88,6 +91,42 @@ static struct omap_hwmod omap2420_uart3_hwmod; static struct omap_hwmod omap2420_i2c1_hwmod; static struct omap_hwmod omap2420_i2c2_hwmod; +/* l4 core -> mcspi1 interface */ +static struct omap_hwmod_addr_space omap2420_mcspi1_addr_space[] = { + { + .pa_start = 0x48098000, + .pa_end = 0x480980ff, + .flags = ADDR_TYPE_RT, + }, +}; + +static struct omap_hwmod_ocp_if omap2420_l4_core__mcspi1 = { + .master = &omap2420_l4_core_hwmod, + .slave = &omap2420_mcspi1_hwmod, + .clk = "mcspi1_ick", + .addr = omap2420_mcspi1_addr_space, + .addr_cnt = ARRAY_SIZE(omap2420_mcspi1_addr_space), + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +/* l4 core -> mcspi2 interface */ +static struct omap_hwmod_addr_space omap2420_mcspi2_addr_space[] = { + { + .pa_start = 0x4809a000, + .pa_end = 0x4809a0ff, + .flags = ADDR_TYPE_RT, + }, +}; + +static struct omap_hwmod_ocp_if omap2420_l4_core__mcspi2 = { + .master = &omap2420_l4_core_hwmod, + .slave = &omap2420_mcspi2_hwmod, + .clk = "mcspi2_ick", + .addr = omap2420_mcspi2_addr_space, + .addr_cnt = ARRAY_SIZE(omap2420_mcspi2_addr_space), + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + /* L4_CORE -> L4_WKUP interface */ static struct omap_hwmod_ocp_if omap2420_l4_core__l4_wkup = { .master = &omap2420_l4_core_hwmod, @@ -864,6 +903,119 @@ static struct omap_hwmod omap2420_dma_system_hwmod = { .flags = HWMOD_NO_IDLEST, }; +/* + * 'mcspi' class + * multichannel serial port interface (mcspi) / master/slave synchronous serial + * bus + */ + +static struct omap_hwmod_class_sysconfig omap2420_mcspi_sysc = { + .rev_offs = 0x0000, + .sysc_offs = 0x0010, + .syss_offs = 0x0014, + .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | + SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | + SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), + .sysc_fields = &omap_hwmod_sysc_type1, +}; + +static struct omap_hwmod_class omap2420_mcspi_class = { + .name = "mcspi", + .sysc = &omap2420_mcspi_sysc, + .rev = OMAP2_MCSPI_REV, +}; + +/* mcspi1 */ +static struct omap_hwmod_irq_info omap2420_mcspi1_mpu_irqs[] = { + { .irq = 65 }, +}; + +static struct omap_hwmod_dma_info omap2420_mcspi1_sdma_reqs[] = { + { .name = "tx0", .dma_req = 35 }, /* DMA_SPI1_TX0 */ + { .name = "rx0", .dma_req = 36 }, /* DMA_SPI1_RX0 */ + { .name = "tx1", .dma_req = 37 }, /* DMA_SPI1_TX1 */ + { .name = "rx1", .dma_req = 38 }, /* DMA_SPI1_RX1 */ + { .name = "tx2", .dma_req = 39 }, /* DMA_SPI1_TX2 */ + { .name = "rx2", .dma_req = 40 }, /* DMA_SPI1_RX2 */ + { .name = "tx3", .dma_req = 41 }, /* DMA_SPI1_TX3 */ + { .name = "rx3", .dma_req = 42 }, /* DMA_SPI1_RX3 */ +}; + +static struct omap_hwmod_ocp_if *omap2420_mcspi1_slaves[] = { + &omap2420_l4_core__mcspi1, +}; + +static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = { + .num_chipselect = 4, +}; + +static struct omap_hwmod omap2420_mcspi1_hwmod = { + .name = "mcspi1_hwmod", + .mpu_irqs = omap2420_mcspi1_mpu_irqs, + .mpu_irqs_cnt = ARRAY_SIZE(omap2420_mcspi1_mpu_irqs), + .sdma_reqs = omap2420_mcspi1_sdma_reqs, + .sdma_reqs_cnt = ARRAY_SIZE(omap2420_mcspi1_sdma_reqs), + .main_clk = "mcspi1_fck", + .prcm = { + .omap2 = { + .module_offs = CORE_MOD, + .prcm_reg_id = 1, + .module_bit = OMAP24XX_EN_MCSPI1_SHIFT, + .idlest_reg_id = 1, + .idlest_idle_bit = OMAP24XX_ST_MCSPI1_SHIFT, + }, + }, + .slaves = omap2420_mcspi1_slaves, + .slaves_cnt = ARRAY_SIZE(omap2420_mcspi1_slaves), + .class = &omap2420_mcspi_class, + .dev_attr = &omap_mcspi1_dev_attr, + .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), +}; + +/* mcspi2 */ +static struct omap_hwmod_irq_info omap2420_mcspi2_mpu_irqs[] = { + { .irq = 66 }, +}; + +static struct omap_hwmod_dma_info omap2420_mcspi2_sdma_reqs[] = { + { .name = "tx0", .dma_req = 43 }, /* DMA_SPI2_TX0 */ + { .name = "rx0", .dma_req = 44 }, /* DMA_SPI2_RX0 */ + { .name = "tx1", .dma_req = 45 }, /* DMA_SPI2_TX1 */ + { .name = "rx1", .dma_req = 46 }, /* DMA_SPI2_RX1 */ +}; + +static struct omap_hwmod_ocp_if *omap2420_mcspi2_slaves[] = { + &omap2420_l4_core__mcspi2, +}; + +static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = { + .num_chipselect = 2, +}; + +static struct omap_hwmod omap2420_mcspi2_hwmod = { + .name = "mcspi2_hwmod", + .mpu_irqs = omap2420_mcspi2_mpu_irqs, + .mpu_irqs_cnt = ARRAY_SIZE(omap2420_mcspi2_mpu_irqs), + .sdma_reqs = omap2420_mcspi2_sdma_reqs, + .sdma_reqs_cnt = ARRAY_SIZE(omap2420_mcspi2_sdma_reqs), + .main_clk = "mcspi2_fck", + .prcm = { + .omap2 = { + .module_offs = CORE_MOD, + .prcm_reg_id = 1, + .module_bit = OMAP24XX_EN_MCSPI2_SHIFT, + .idlest_reg_id = 1, + .idlest_idle_bit = OMAP24XX_ST_MCSPI2_SHIFT, + }, + }, + .slaves = omap2420_mcspi2_slaves, + .slaves_cnt = ARRAY_SIZE(omap2420_mcspi2_slaves), + .class = &omap2420_mcspi_class, + .dev_attr = &omap_mcspi2_dev_attr, + .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), +}; + static __initdata struct omap_hwmod *omap2420_hwmods[] = { &omap2420_l3_main_hwmod, &omap2420_l4_core_hwmod, @@ -885,6 +1037,10 @@ static __initdata struct omap_hwmod *omap2420_hwmods[] = { /* dma_system class*/ &omap2420_dma_system_hwmod, + + /* mcspi class */ + &omap2420_mcspi1_hwmod, + &omap2420_mcspi2_hwmod, NULL, }; diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c index 8ecfbcde13ba..7ba688a1c840 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c @@ -18,6 +18,7 @@ #include <plat/serial.h> #include <plat/i2c.h> #include <plat/gpio.h> +#include <plat/mcspi.h> #include "omap_hwmod_common_data.h" @@ -45,6 +46,9 @@ static struct omap_hwmod omap2430_gpio3_hwmod; static struct omap_hwmod omap2430_gpio4_hwmod; static struct omap_hwmod omap2430_gpio5_hwmod; static struct omap_hwmod omap2430_dma_system_hwmod; +static struct omap_hwmod omap2430_mcspi1_hwmod; +static struct omap_hwmod omap2430_mcspi2_hwmod; +static struct omap_hwmod omap2430_mcspi3_hwmod; /* L3 -> L4_CORE interface */ static struct omap_hwmod_ocp_if omap2430_l3_main__l4_core = { @@ -89,6 +93,16 @@ static struct omap_hwmod omap2430_uart3_hwmod; static struct omap_hwmod omap2430_i2c1_hwmod; static struct omap_hwmod omap2430_i2c2_hwmod; +static struct omap_hwmod omap2430_usbhsotg_hwmod; + +/* l3_core -> usbhsotg interface */ +static struct omap_hwmod_ocp_if omap2430_usbhsotg__l3 = { + .master = &omap2430_usbhsotg_hwmod, + .slave = &omap2430_l3_main_hwmod, + .clk = "core_l3_ck", + .user = OCP_USER_MPU, +}; + /* I2C IP block address space length (in bytes) */ #define OMAP2_I2C_AS_LEN 128 @@ -189,6 +203,35 @@ static struct omap_hwmod_ocp_if omap2_l4_core__uart3 = { .user = OCP_USER_MPU | OCP_USER_SDMA, }; +/* +* usbhsotg interface data +*/ +static struct omap_hwmod_addr_space omap2430_usbhsotg_addrs[] = { + { + .pa_start = OMAP243X_HS_BASE, + .pa_end = OMAP243X_HS_BASE + SZ_4K - 1, + .flags = ADDR_TYPE_RT + }, +}; + +/* l4_core ->usbhsotg interface */ +static struct omap_hwmod_ocp_if omap2430_l4_core__usbhsotg = { + .master = &omap2430_l4_core_hwmod, + .slave = &omap2430_usbhsotg_hwmod, + .clk = "usb_l4_ick", + .addr = omap2430_usbhsotg_addrs, + .addr_cnt = ARRAY_SIZE(omap2430_usbhsotg_addrs), + .user = OCP_USER_MPU, +}; + +static struct omap_hwmod_ocp_if *omap2430_usbhsotg_masters[] = { + &omap2430_usbhsotg__l3, +}; + +static struct omap_hwmod_ocp_if *omap2430_usbhsotg_slaves[] = { + &omap2430_l4_core__usbhsotg, +}; + /* Slave interfaces on the L4_CORE interconnect */ static struct omap_hwmod_ocp_if *omap2430_l4_core_slaves[] = { &omap2430_l3_main__l4_core, @@ -223,6 +266,60 @@ static struct omap_hwmod_ocp_if *omap2430_l4_wkup_slaves[] = { static struct omap_hwmod_ocp_if *omap2430_l4_wkup_masters[] = { }; +/* l4 core -> mcspi1 interface */ +static struct omap_hwmod_addr_space omap2430_mcspi1_addr_space[] = { + { + .pa_start = 0x48098000, + .pa_end = 0x480980ff, + .flags = ADDR_TYPE_RT, + }, +}; + +static struct omap_hwmod_ocp_if omap2430_l4_core__mcspi1 = { + .master = &omap2430_l4_core_hwmod, + .slave = &omap2430_mcspi1_hwmod, + .clk = "mcspi1_ick", + .addr = omap2430_mcspi1_addr_space, + .addr_cnt = ARRAY_SIZE(omap2430_mcspi1_addr_space), + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +/* l4 core -> mcspi2 interface */ +static struct omap_hwmod_addr_space omap2430_mcspi2_addr_space[] = { + { + .pa_start = 0x4809a000, + .pa_end = 0x4809a0ff, + .flags = ADDR_TYPE_RT, + }, +}; + +static struct omap_hwmod_ocp_if omap2430_l4_core__mcspi2 = { + .master = &omap2430_l4_core_hwmod, + .slave = &omap2430_mcspi2_hwmod, + .clk = "mcspi2_ick", + .addr = omap2430_mcspi2_addr_space, + .addr_cnt = ARRAY_SIZE(omap2430_mcspi2_addr_space), + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +/* l4 core -> mcspi3 interface */ +static struct omap_hwmod_addr_space omap2430_mcspi3_addr_space[] = { + { + .pa_start = 0x480b8000, + .pa_end = 0x480b80ff, + .flags = ADDR_TYPE_RT, + }, +}; + +static struct omap_hwmod_ocp_if omap2430_l4_core__mcspi3 = { + .master = &omap2430_l4_core_hwmod, + .slave = &omap2430_mcspi3_hwmod, + .clk = "mcspi3_ick", + .addr = omap2430_mcspi3_addr_space, + .addr_cnt = ARRAY_SIZE(omap2430_mcspi3_addr_space), + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + /* L4 WKUP */ static struct omap_hwmod omap2430_l4_wkup_hwmod = { .name = "l4_wkup", @@ -919,6 +1016,220 @@ static struct omap_hwmod omap2430_dma_system_hwmod = { .flags = HWMOD_NO_IDLEST, }; +/* + * 'mcspi' class + * multichannel serial port interface (mcspi) / master/slave synchronous serial + * bus + */ + +static struct omap_hwmod_class_sysconfig omap2430_mcspi_sysc = { + .rev_offs = 0x0000, + .sysc_offs = 0x0010, + .syss_offs = 0x0014, + .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | + SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | + SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), + .sysc_fields = &omap_hwmod_sysc_type1, +}; + +static struct omap_hwmod_class omap2430_mcspi_class = { + .name = "mcspi", + .sysc = &omap2430_mcspi_sysc, + .rev = OMAP2_MCSPI_REV, +}; + +/* mcspi1 */ +static struct omap_hwmod_irq_info omap2430_mcspi1_mpu_irqs[] = { + { .irq = 65 }, +}; + +static struct omap_hwmod_dma_info omap2430_mcspi1_sdma_reqs[] = { + { .name = "tx0", .dma_req = 35 }, /* DMA_SPI1_TX0 */ + { .name = "rx0", .dma_req = 36 }, /* DMA_SPI1_RX0 */ + { .name = "tx1", .dma_req = 37 }, /* DMA_SPI1_TX1 */ + { .name = "rx1", .dma_req = 38 }, /* DMA_SPI1_RX1 */ + { .name = "tx2", .dma_req = 39 }, /* DMA_SPI1_TX2 */ + { .name = "rx2", .dma_req = 40 }, /* DMA_SPI1_RX2 */ + { .name = "tx3", .dma_req = 41 }, /* DMA_SPI1_TX3 */ + { .name = "rx3", .dma_req = 42 }, /* DMA_SPI1_RX3 */ +}; + +static struct omap_hwmod_ocp_if *omap2430_mcspi1_slaves[] = { + &omap2430_l4_core__mcspi1, +}; + +static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = { + .num_chipselect = 4, +}; + +static struct omap_hwmod omap2430_mcspi1_hwmod = { + .name = "mcspi1_hwmod", + .mpu_irqs = omap2430_mcspi1_mpu_irqs, + .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcspi1_mpu_irqs), + .sdma_reqs = omap2430_mcspi1_sdma_reqs, + .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcspi1_sdma_reqs), + .main_clk = "mcspi1_fck", + .prcm = { + .omap2 = { + .module_offs = CORE_MOD, + .prcm_reg_id = 1, + .module_bit = OMAP24XX_EN_MCSPI1_SHIFT, + .idlest_reg_id = 1, + .idlest_idle_bit = OMAP24XX_ST_MCSPI1_SHIFT, + }, + }, + .slaves = omap2430_mcspi1_slaves, + .slaves_cnt = ARRAY_SIZE(omap2430_mcspi1_slaves), + .class = &omap2430_mcspi_class, + .dev_attr = &omap_mcspi1_dev_attr, + .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), +}; + +/* mcspi2 */ +static struct omap_hwmod_irq_info omap2430_mcspi2_mpu_irqs[] = { + { .irq = 66 }, +}; + +static struct omap_hwmod_dma_info omap2430_mcspi2_sdma_reqs[] = { + { .name = "tx0", .dma_req = 43 }, /* DMA_SPI2_TX0 */ + { .name = "rx0", .dma_req = 44 }, /* DMA_SPI2_RX0 */ + { .name = "tx1", .dma_req = 45 }, /* DMA_SPI2_TX1 */ + { .name = "rx1", .dma_req = 46 }, /* DMA_SPI2_RX1 */ +}; + +static struct omap_hwmod_ocp_if *omap2430_mcspi2_slaves[] = { + &omap2430_l4_core__mcspi2, +}; + +static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = { + .num_chipselect = 2, +}; + +static struct omap_hwmod omap2430_mcspi2_hwmod = { + .name = "mcspi2_hwmod", + .mpu_irqs = omap2430_mcspi2_mpu_irqs, + .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcspi2_mpu_irqs), + .sdma_reqs = omap2430_mcspi2_sdma_reqs, + .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcspi2_sdma_reqs), + .main_clk = "mcspi2_fck", + .prcm = { + .omap2 = { + .module_offs = CORE_MOD, + .prcm_reg_id = 1, + .module_bit = OMAP24XX_EN_MCSPI2_SHIFT, + .idlest_reg_id = 1, + .idlest_idle_bit = OMAP24XX_ST_MCSPI2_SHIFT, + }, + }, + .slaves = omap2430_mcspi2_slaves, + .slaves_cnt = ARRAY_SIZE(omap2430_mcspi2_slaves), + .class = &omap2430_mcspi_class, + .dev_attr = &omap_mcspi2_dev_attr, + .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), +}; + +/* mcspi3 */ +static struct omap_hwmod_irq_info omap2430_mcspi3_mpu_irqs[] = { + { .irq = 91 }, +}; + +static struct omap_hwmod_dma_info omap2430_mcspi3_sdma_reqs[] = { + { .name = "tx0", .dma_req = 15 }, /* DMA_SPI3_TX0 */ + { .name = "rx0", .dma_req = 16 }, /* DMA_SPI3_RX0 */ + { .name = "tx1", .dma_req = 23 }, /* DMA_SPI3_TX1 */ + { .name = "rx1", .dma_req = 24 }, /* DMA_SPI3_RX1 */ +}; + +static struct omap_hwmod_ocp_if *omap2430_mcspi3_slaves[] = { + &omap2430_l4_core__mcspi3, +}; + +static struct omap2_mcspi_dev_attr omap_mcspi3_dev_attr = { + .num_chipselect = 2, +}; + +static struct omap_hwmod omap2430_mcspi3_hwmod = { + .name = "mcspi3_hwmod", + .mpu_irqs = omap2430_mcspi3_mpu_irqs, + .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcspi3_mpu_irqs), + .sdma_reqs = omap2430_mcspi3_sdma_reqs, + .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcspi3_sdma_reqs), + .main_clk = "mcspi3_fck", + .prcm = { + .omap2 = { + .module_offs = CORE_MOD, + .prcm_reg_id = 2, + .module_bit = OMAP2430_EN_MCSPI3_SHIFT, + .idlest_reg_id = 2, + .idlest_idle_bit = OMAP2430_ST_MCSPI3_SHIFT, + }, + }, + .slaves = omap2430_mcspi3_slaves, + .slaves_cnt = ARRAY_SIZE(omap2430_mcspi3_slaves), + .class = &omap2430_mcspi_class, + .dev_attr = &omap_mcspi3_dev_attr, + .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), +}; + +/* + * usbhsotg + */ +static struct omap_hwmod_class_sysconfig omap2430_usbhsotg_sysc = { + .rev_offs = 0x0400, + .sysc_offs = 0x0404, + .syss_offs = 0x0408, + .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE| + SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | + SYSC_HAS_AUTOIDLE), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | + MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), + .sysc_fields = &omap_hwmod_sysc_type1, +}; + +static struct omap_hwmod_class usbotg_class = { + .name = "usbotg", + .sysc = &omap2430_usbhsotg_sysc, +}; + +/* usb_otg_hs */ +static struct omap_hwmod_irq_info omap2430_usbhsotg_mpu_irqs[] = { + + { .name = "mc", .irq = 92 }, + { .name = "dma", .irq = 93 }, +}; + +static struct omap_hwmod omap2430_usbhsotg_hwmod = { + .name = "usb_otg_hs", + .mpu_irqs = omap2430_usbhsotg_mpu_irqs, + .mpu_irqs_cnt = ARRAY_SIZE(omap2430_usbhsotg_mpu_irqs), + .main_clk = "usbhs_ick", + .prcm = { + .omap2 = { + .prcm_reg_id = 1, + .module_bit = OMAP2430_EN_USBHS_MASK, + .module_offs = CORE_MOD, + .idlest_reg_id = 1, + .idlest_idle_bit = OMAP2430_ST_USBHS_SHIFT, + }, + }, + .masters = omap2430_usbhsotg_masters, + .masters_cnt = ARRAY_SIZE(omap2430_usbhsotg_masters), + .slaves = omap2430_usbhsotg_slaves, + .slaves_cnt = ARRAY_SIZE(omap2430_usbhsotg_slaves), + .class = &usbotg_class, + /* + * Erratum ID: i479 idle_req / idle_ack mechanism potentially + * broken when autoidle is enabled + * workaround is to disable the autoidle bit at module level. + */ + .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE + | HWMOD_SWSUP_MSTANDBY, + .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430) +}; + + + static __initdata struct omap_hwmod *omap2430_hwmods[] = { &omap2430_l3_main_hwmod, &omap2430_l4_core_hwmod, @@ -941,6 +1252,15 @@ static __initdata struct omap_hwmod *omap2430_hwmods[] = { /* dma_system class*/ &omap2430_dma_system_hwmod, + + /* mcspi class */ + &omap2430_mcspi1_hwmod, + &omap2430_mcspi2_hwmod, + &omap2430_mcspi3_hwmod, + + /* usbotg class*/ + &omap2430_usbhsotg_hwmod, + NULL, }; diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 8d8181334f86..879f55f272e2 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -22,12 +22,14 @@ #include <plat/i2c.h> #include <plat/gpio.h> #include <plat/smartreflex.h> +#include <plat/mcspi.h> #include "omap_hwmod_common_data.h" #include "prm-regbits-34xx.h" #include "cm-regbits-34xx.h" #include "wd_timer.h" +#include <mach/am35xx.h> /* * OMAP3xxx hardware module integration data @@ -55,6 +57,11 @@ static struct omap_hwmod omap3xxx_gpio5_hwmod; static struct omap_hwmod omap3xxx_gpio6_hwmod; static struct omap_hwmod omap34xx_sr1_hwmod; static struct omap_hwmod omap34xx_sr2_hwmod; +static struct omap_hwmod omap34xx_mcspi1; +static struct omap_hwmod omap34xx_mcspi2; +static struct omap_hwmod omap34xx_mcspi3; +static struct omap_hwmod omap34xx_mcspi4; +static struct omap_hwmod am35xx_usbhsotg_hwmod; static struct omap_hwmod omap3xxx_dma_system_hwmod; @@ -107,7 +114,23 @@ static struct omap_hwmod omap3xxx_uart1_hwmod; static struct omap_hwmod omap3xxx_uart2_hwmod; static struct omap_hwmod omap3xxx_uart3_hwmod; static struct omap_hwmod omap3xxx_uart4_hwmod; +static struct omap_hwmod omap3xxx_usbhsotg_hwmod; +/* l3_core -> usbhsotg interface */ +static struct omap_hwmod_ocp_if omap3xxx_usbhsotg__l3 = { + .master = &omap3xxx_usbhsotg_hwmod, + .slave = &omap3xxx_l3_main_hwmod, + .clk = "core_l3_ick", + .user = OCP_USER_MPU, +}; + +/* l3_core -> am35xx_usbhsotg interface */ +static struct omap_hwmod_ocp_if am35xx_usbhsotg__l3 = { + .master = &am35xx_usbhsotg_hwmod, + .slave = &omap3xxx_l3_main_hwmod, + .clk = "core_l3_ick", + .user = OCP_USER_MPU, +}; /* L4_CORE -> L4_WKUP interface */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__l4_wkup = { .master = &omap3xxx_l4_core_hwmod, @@ -301,6 +324,61 @@ static struct omap_hwmod_ocp_if omap3_l4_core__sr2 = { .user = OCP_USER_MPU, }; +/* +* usbhsotg interface data +*/ + +static struct omap_hwmod_addr_space omap3xxx_usbhsotg_addrs[] = { + { + .pa_start = OMAP34XX_HSUSB_OTG_BASE, + .pa_end = OMAP34XX_HSUSB_OTG_BASE + SZ_4K - 1, + .flags = ADDR_TYPE_RT + }, +}; + +/* l4_core -> usbhsotg */ +static struct omap_hwmod_ocp_if omap3xxx_l4_core__usbhsotg = { + .master = &omap3xxx_l4_core_hwmod, + .slave = &omap3xxx_usbhsotg_hwmod, + .clk = "l4_ick", + .addr = omap3xxx_usbhsotg_addrs, + .addr_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_addrs), + .user = OCP_USER_MPU, +}; + +static struct omap_hwmod_ocp_if *omap3xxx_usbhsotg_masters[] = { + &omap3xxx_usbhsotg__l3, +}; + +static struct omap_hwmod_ocp_if *omap3xxx_usbhsotg_slaves[] = { + &omap3xxx_l4_core__usbhsotg, +}; + +static struct omap_hwmod_addr_space am35xx_usbhsotg_addrs[] = { + { + .pa_start = AM35XX_IPSS_USBOTGSS_BASE, + .pa_end = AM35XX_IPSS_USBOTGSS_BASE + SZ_4K - 1, + .flags = ADDR_TYPE_RT + }, +}; + +/* l4_core -> usbhsotg */ +static struct omap_hwmod_ocp_if am35xx_l4_core__usbhsotg = { + .master = &omap3xxx_l4_core_hwmod, + .slave = &am35xx_usbhsotg_hwmod, + .clk = "l4_ick", + .addr = am35xx_usbhsotg_addrs, + .addr_cnt = ARRAY_SIZE(am35xx_usbhsotg_addrs), + .user = OCP_USER_MPU, +}; + +static struct omap_hwmod_ocp_if *am35xx_usbhsotg_masters[] = { + &am35xx_usbhsotg__l3, +}; + +static struct omap_hwmod_ocp_if *am35xx_usbhsotg_slaves[] = { + &am35xx_l4_core__usbhsotg, +}; /* Slave interfaces on the L4_CORE interconnect */ static struct omap_hwmod_ocp_if *omap3xxx_l4_core_slaves[] = { &omap3xxx_l3_main__l4_core, @@ -1356,6 +1434,360 @@ static struct omap_hwmod omap36xx_sr2_hwmod = { .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3630ES1), }; +/* l4 core -> mcspi1 interface */ +static struct omap_hwmod_addr_space omap34xx_mcspi1_addr_space[] = { + { + .pa_start = 0x48098000, + .pa_end = 0x480980ff, + .flags = ADDR_TYPE_RT, + }, +}; + +static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi1 = { + .master = &omap3xxx_l4_core_hwmod, + .slave = &omap34xx_mcspi1, + .clk = "mcspi1_ick", + .addr = omap34xx_mcspi1_addr_space, + .addr_cnt = ARRAY_SIZE(omap34xx_mcspi1_addr_space), + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +/* l4 core -> mcspi2 interface */ +static struct omap_hwmod_addr_space omap34xx_mcspi2_addr_space[] = { + { + .pa_start = 0x4809a000, + .pa_end = 0x4809a0ff, + .flags = ADDR_TYPE_RT, + }, +}; + +static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi2 = { + .master = &omap3xxx_l4_core_hwmod, + .slave = &omap34xx_mcspi2, + .clk = "mcspi2_ick", + .addr = omap34xx_mcspi2_addr_space, + .addr_cnt = ARRAY_SIZE(omap34xx_mcspi2_addr_space), + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +/* l4 core -> mcspi3 interface */ +static struct omap_hwmod_addr_space omap34xx_mcspi3_addr_space[] = { + { + .pa_start = 0x480b8000, + .pa_end = 0x480b80ff, + .flags = ADDR_TYPE_RT, + }, +}; + +static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi3 = { + .master = &omap3xxx_l4_core_hwmod, + .slave = &omap34xx_mcspi3, + .clk = "mcspi3_ick", + .addr = omap34xx_mcspi3_addr_space, + .addr_cnt = ARRAY_SIZE(omap34xx_mcspi3_addr_space), + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +/* l4 core -> mcspi4 interface */ +static struct omap_hwmod_addr_space omap34xx_mcspi4_addr_space[] = { + { + .pa_start = 0x480ba000, + .pa_end = 0x480ba0ff, + .flags = ADDR_TYPE_RT, + }, +}; + +static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi4 = { + .master = &omap3xxx_l4_core_hwmod, + .slave = &omap34xx_mcspi4, + .clk = "mcspi4_ick", + .addr = omap34xx_mcspi4_addr_space, + .addr_cnt = ARRAY_SIZE(omap34xx_mcspi4_addr_space), + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +/* + * 'mcspi' class + * multichannel serial port interface (mcspi) / master/slave synchronous serial + * bus + */ + +static struct omap_hwmod_class_sysconfig omap34xx_mcspi_sysc = { + .rev_offs = 0x0000, + .sysc_offs = 0x0010, + .syss_offs = 0x0014, + .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | + SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | + SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), + .sysc_fields = &omap_hwmod_sysc_type1, +}; + +static struct omap_hwmod_class omap34xx_mcspi_class = { + .name = "mcspi", + .sysc = &omap34xx_mcspi_sysc, + .rev = OMAP3_MCSPI_REV, +}; + +/* mcspi1 */ +static struct omap_hwmod_irq_info omap34xx_mcspi1_mpu_irqs[] = { + { .name = "irq", .irq = 65 }, +}; + +static struct omap_hwmod_dma_info omap34xx_mcspi1_sdma_reqs[] = { + { .name = "tx0", .dma_req = 35 }, + { .name = "rx0", .dma_req = 36 }, + { .name = "tx1", .dma_req = 37 }, + { .name = "rx1", .dma_req = 38 }, + { .name = "tx2", .dma_req = 39 }, + { .name = "rx2", .dma_req = 40 }, + { .name = "tx3", .dma_req = 41 }, + { .name = "rx3", .dma_req = 42 }, +}; + +static struct omap_hwmod_ocp_if *omap34xx_mcspi1_slaves[] = { + &omap34xx_l4_core__mcspi1, +}; + +static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = { + .num_chipselect = 4, +}; + +static struct omap_hwmod omap34xx_mcspi1 = { + .name = "mcspi1", + .mpu_irqs = omap34xx_mcspi1_mpu_irqs, + .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi1_mpu_irqs), + .sdma_reqs = omap34xx_mcspi1_sdma_reqs, + .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi1_sdma_reqs), + .main_clk = "mcspi1_fck", + .prcm = { + .omap2 = { + .module_offs = CORE_MOD, + .prcm_reg_id = 1, + .module_bit = OMAP3430_EN_MCSPI1_SHIFT, + .idlest_reg_id = 1, + .idlest_idle_bit = OMAP3430_ST_MCSPI1_SHIFT, + }, + }, + .slaves = omap34xx_mcspi1_slaves, + .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi1_slaves), + .class = &omap34xx_mcspi_class, + .dev_attr = &omap_mcspi1_dev_attr, + .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), +}; + +/* mcspi2 */ +static struct omap_hwmod_irq_info omap34xx_mcspi2_mpu_irqs[] = { + { .name = "irq", .irq = 66 }, +}; + +static struct omap_hwmod_dma_info omap34xx_mcspi2_sdma_reqs[] = { + { .name = "tx0", .dma_req = 43 }, + { .name = "rx0", .dma_req = 44 }, + { .name = "tx1", .dma_req = 45 }, + { .name = "rx1", .dma_req = 46 }, +}; + +static struct omap_hwmod_ocp_if *omap34xx_mcspi2_slaves[] = { + &omap34xx_l4_core__mcspi2, +}; + +static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = { + .num_chipselect = 2, +}; + +static struct omap_hwmod omap34xx_mcspi2 = { + .name = "mcspi2", + .mpu_irqs = omap34xx_mcspi2_mpu_irqs, + .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi2_mpu_irqs), + .sdma_reqs = omap34xx_mcspi2_sdma_reqs, + .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi2_sdma_reqs), + .main_clk = "mcspi2_fck", + .prcm = { + .omap2 = { + .module_offs = CORE_MOD, + .prcm_reg_id = 1, + .module_bit = OMAP3430_EN_MCSPI2_SHIFT, + .idlest_reg_id = 1, + .idlest_idle_bit = OMAP3430_ST_MCSPI2_SHIFT, + }, + }, + .slaves = omap34xx_mcspi2_slaves, + .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi2_slaves), + .class = &omap34xx_mcspi_class, + .dev_attr = &omap_mcspi2_dev_attr, + .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), +}; + +/* mcspi3 */ +static struct omap_hwmod_irq_info omap34xx_mcspi3_mpu_irqs[] = { + { .name = "irq", .irq = 91 }, /* 91 */ +}; + +static struct omap_hwmod_dma_info omap34xx_mcspi3_sdma_reqs[] = { + { .name = "tx0", .dma_req = 15 }, + { .name = "rx0", .dma_req = 16 }, + { .name = "tx1", .dma_req = 23 }, + { .name = "rx1", .dma_req = 24 }, +}; + +static struct omap_hwmod_ocp_if *omap34xx_mcspi3_slaves[] = { + &omap34xx_l4_core__mcspi3, +}; + +static struct omap2_mcspi_dev_attr omap_mcspi3_dev_attr = { + .num_chipselect = 2, +}; + +static struct omap_hwmod omap34xx_mcspi3 = { + .name = "mcspi3", + .mpu_irqs = omap34xx_mcspi3_mpu_irqs, + .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi3_mpu_irqs), + .sdma_reqs = omap34xx_mcspi3_sdma_reqs, + .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi3_sdma_reqs), + .main_clk = "mcspi3_fck", + .prcm = { + .omap2 = { + .module_offs = CORE_MOD, + .prcm_reg_id = 1, + .module_bit = OMAP3430_EN_MCSPI3_SHIFT, + .idlest_reg_id = 1, + .idlest_idle_bit = OMAP3430_ST_MCSPI3_SHIFT, + }, + }, + .slaves = omap34xx_mcspi3_slaves, + .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi3_slaves), + .class = &omap34xx_mcspi_class, + .dev_attr = &omap_mcspi3_dev_attr, + .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), +}; + +/* SPI4 */ +static struct omap_hwmod_irq_info omap34xx_mcspi4_mpu_irqs[] = { + { .name = "irq", .irq = INT_34XX_SPI4_IRQ }, /* 48 */ +}; + +static struct omap_hwmod_dma_info omap34xx_mcspi4_sdma_reqs[] = { + { .name = "tx0", .dma_req = 70 }, /* DMA_SPI4_TX0 */ + { .name = "rx0", .dma_req = 71 }, /* DMA_SPI4_RX0 */ +}; + +static struct omap_hwmod_ocp_if *omap34xx_mcspi4_slaves[] = { + &omap34xx_l4_core__mcspi4, +}; + +static struct omap2_mcspi_dev_attr omap_mcspi4_dev_attr = { + .num_chipselect = 1, +}; + +static struct omap_hwmod omap34xx_mcspi4 = { + .name = "mcspi4", + .mpu_irqs = omap34xx_mcspi4_mpu_irqs, + .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi4_mpu_irqs), + .sdma_reqs = omap34xx_mcspi4_sdma_reqs, + .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi4_sdma_reqs), + .main_clk = "mcspi4_fck", + .prcm = { + .omap2 = { + .module_offs = CORE_MOD, + .prcm_reg_id = 1, + .module_bit = OMAP3430_EN_MCSPI4_SHIFT, + .idlest_reg_id = 1, + .idlest_idle_bit = OMAP3430_ST_MCSPI4_SHIFT, + }, + }, + .slaves = omap34xx_mcspi4_slaves, + .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi4_slaves), + .class = &omap34xx_mcspi_class, + .dev_attr = &omap_mcspi4_dev_attr, + .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), +}; + +/* + * usbhsotg + */ +static struct omap_hwmod_class_sysconfig omap3xxx_usbhsotg_sysc = { + .rev_offs = 0x0400, + .sysc_offs = 0x0404, + .syss_offs = 0x0408, + .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE| + SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | + SYSC_HAS_AUTOIDLE), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | + MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), + .sysc_fields = &omap_hwmod_sysc_type1, +}; + +static struct omap_hwmod_class usbotg_class = { + .name = "usbotg", + .sysc = &omap3xxx_usbhsotg_sysc, +}; +/* usb_otg_hs */ +static struct omap_hwmod_irq_info omap3xxx_usbhsotg_mpu_irqs[] = { + + { .name = "mc", .irq = 92 }, + { .name = "dma", .irq = 93 }, +}; + +static struct omap_hwmod omap3xxx_usbhsotg_hwmod = { + .name = "usb_otg_hs", + .mpu_irqs = omap3xxx_usbhsotg_mpu_irqs, + .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_mpu_irqs), + .main_clk = "hsotgusb_ick", + .prcm = { + .omap2 = { + .prcm_reg_id = 1, + .module_bit = OMAP3430_EN_HSOTGUSB_SHIFT, + .module_offs = CORE_MOD, + .idlest_reg_id = 1, + .idlest_idle_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT, + .idlest_stdby_bit = OMAP3430ES2_ST_HSOTGUSB_STDBY_SHIFT + }, + }, + .masters = omap3xxx_usbhsotg_masters, + .masters_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_masters), + .slaves = omap3xxx_usbhsotg_slaves, + .slaves_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_slaves), + .class = &usbotg_class, + + /* + * Erratum ID: i479 idle_req / idle_ack mechanism potentially + * broken when autoidle is enabled + * workaround is to disable the autoidle bit at module level. + */ + .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE + | HWMOD_SWSUP_MSTANDBY, + .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430) +}; + +/* usb_otg_hs */ +static struct omap_hwmod_irq_info am35xx_usbhsotg_mpu_irqs[] = { + + { .name = "mc", .irq = 71 }, +}; + +static struct omap_hwmod_class am35xx_usbotg_class = { + .name = "am35xx_usbotg", + .sysc = NULL, +}; + +static struct omap_hwmod am35xx_usbhsotg_hwmod = { + .name = "am35x_otg_hs", + .mpu_irqs = am35xx_usbhsotg_mpu_irqs, + .mpu_irqs_cnt = ARRAY_SIZE(am35xx_usbhsotg_mpu_irqs), + .main_clk = NULL, + .prcm = { + .omap2 = { + }, + }, + .masters = am35xx_usbhsotg_masters, + .masters_cnt = ARRAY_SIZE(am35xx_usbhsotg_masters), + .slaves = am35xx_usbhsotg_slaves, + .slaves_cnt = ARRAY_SIZE(am35xx_usbhsotg_slaves), + .class = &am35xx_usbotg_class, + .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES3_1) +}; + static __initdata struct omap_hwmod *omap3xxx_hwmods[] = { &omap3xxx_l3_main_hwmod, &omap3xxx_l4_core_hwmod, @@ -1387,6 +1819,19 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = { /* dma_system class*/ &omap3xxx_dma_system_hwmod, + + /* mcspi class */ + &omap34xx_mcspi1, + &omap34xx_mcspi2, + &omap34xx_mcspi3, + &omap34xx_mcspi4, + + /* usbotg class */ + &omap3xxx_usbhsotg_hwmod, + + /* usbotg for am35x */ + &am35xx_usbhsotg_hwmod, + NULL, }; diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 84e795cf0648..79a860178913 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -24,6 +24,7 @@ #include <plat/cpu.h> #include <plat/gpio.h> #include <plat/dma.h> +#include <plat/mcspi.h> #include "omap_hwmod_common_data.h" @@ -3114,6 +3115,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_mcspi_sysc = { static struct omap_hwmod_class omap44xx_mcspi_hwmod_class = { .name = "mcspi", .sysc = &omap44xx_mcspi_sysc, + .rev = OMAP4_MCSPI_REV, }; /* mcspi1 */ @@ -3156,6 +3158,11 @@ static struct omap_hwmod_ocp_if *omap44xx_mcspi1_slaves[] = { &omap44xx_l4_per__mcspi1, }; +/* mcspi1 dev_attr */ +static struct omap2_mcspi_dev_attr mcspi1_dev_attr = { + .num_chipselect = 4, +}; + static struct omap_hwmod omap44xx_mcspi1_hwmod = { .name = "mcspi1", .class = &omap44xx_mcspi_hwmod_class, @@ -3169,6 +3176,7 @@ static struct omap_hwmod omap44xx_mcspi1_hwmod = { .clkctrl_reg = OMAP4430_CM_L4PER_MCSPI1_CLKCTRL, }, }, + .dev_attr = &mcspi1_dev_attr, .slaves = omap44xx_mcspi1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi1_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430), @@ -3210,6 +3218,11 @@ static struct omap_hwmod_ocp_if *omap44xx_mcspi2_slaves[] = { &omap44xx_l4_per__mcspi2, }; +/* mcspi2 dev_attr */ +static struct omap2_mcspi_dev_attr mcspi2_dev_attr = { + .num_chipselect = 2, +}; + static struct omap_hwmod omap44xx_mcspi2_hwmod = { .name = "mcspi2", .class = &omap44xx_mcspi_hwmod_class, @@ -3223,6 +3236,7 @@ static struct omap_hwmod omap44xx_mcspi2_hwmod = { .clkctrl_reg = OMAP4430_CM_L4PER_MCSPI2_CLKCTRL, }, }, + .dev_attr = &mcspi2_dev_attr, .slaves = omap44xx_mcspi2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi2_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430), @@ -3264,6 +3278,11 @@ static struct omap_hwmod_ocp_if *omap44xx_mcspi3_slaves[] = { &omap44xx_l4_per__mcspi3, }; +/* mcspi3 dev_attr */ +static struct omap2_mcspi_dev_attr mcspi3_dev_attr = { + .num_chipselect = 2, +}; + static struct omap_hwmod omap44xx_mcspi3_hwmod = { .name = "mcspi3", .class = &omap44xx_mcspi_hwmod_class, @@ -3277,6 +3296,7 @@ static struct omap_hwmod omap44xx_mcspi3_hwmod = { .clkctrl_reg = OMAP4430_CM_L4PER_MCSPI3_CLKCTRL, }, }, + .dev_attr = &mcspi3_dev_attr, .slaves = omap44xx_mcspi3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi3_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430), @@ -3316,6 +3336,11 @@ static struct omap_hwmod_ocp_if *omap44xx_mcspi4_slaves[] = { &omap44xx_l4_per__mcspi4, }; +/* mcspi4 dev_attr */ +static struct omap2_mcspi_dev_attr mcspi4_dev_attr = { + .num_chipselect = 1, +}; + static struct omap_hwmod omap44xx_mcspi4_hwmod = { .name = "mcspi4", .class = &omap44xx_mcspi_hwmod_class, @@ -3329,6 +3354,7 @@ static struct omap_hwmod omap44xx_mcspi4_hwmod = { .clkctrl_reg = OMAP4430_CM_L4PER_MCSPI4_CLKCTRL, }, }, + .dev_attr = &mcspi4_dev_attr, .slaves = omap44xx_mcspi4_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi4_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430), @@ -3963,6 +3989,7 @@ static struct omap_hwmod_ocp_if *omap44xx_timer1_slaves[] = { static struct omap_hwmod omap44xx_timer1_hwmod = { .name = "timer1", .class = &omap44xx_timer_1ms_hwmod_class, + .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET, .mpu_irqs = omap44xx_timer1_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer1_irqs), .main_clk = "timer1_fck", diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c index 745252c60e32..f172ec06c06a 100644 --- a/arch/arm/mach-omap2/omap_phy_internal.c +++ b/arch/arm/mach-omap2/omap_phy_internal.c @@ -29,6 +29,7 @@ #include <linux/usb.h> #include <plat/usb.h> +#include "control.h" /* OMAP control module register for UTMI PHY */ #define CONTROL_DEV_CONF 0x300 @@ -147,3 +148,95 @@ int omap4430_phy_exit(struct device *dev) return 0; } + +void am35x_musb_reset(void) +{ + u32 regval; + + /* Reset the musb interface */ + regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); + + regval |= AM35XX_USBOTGSS_SW_RST; + omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET); + + regval &= ~AM35XX_USBOTGSS_SW_RST; + omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET); + + regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); +} + +void am35x_musb_phy_power(u8 on) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(100); + u32 devconf2; + + if (on) { + /* + * Start the on-chip PHY and its PLL. + */ + devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2); + + devconf2 &= ~(CONF2_RESET | CONF2_PHYPWRDN | CONF2_OTGPWRDN); + devconf2 |= CONF2_PHY_PLLON; + + omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2); + + pr_info(KERN_INFO "Waiting for PHY clock good...\n"); + while (!(omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2) + & CONF2_PHYCLKGD)) { + cpu_relax(); + + if (time_after(jiffies, timeout)) { + pr_err(KERN_ERR "musb PHY clock good timed out\n"); + break; + } + } + } else { + /* + * Power down the on-chip PHY. + */ + devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2); + + devconf2 &= ~CONF2_PHY_PLLON; + devconf2 |= CONF2_PHYPWRDN | CONF2_OTGPWRDN; + omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2); + } +} + +void am35x_musb_clear_irq(void) +{ + u32 regval; + + regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); + regval |= AM35XX_USBOTGSS_INT_CLR; + omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR); + regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); +} + +void am35x_musb_set_mode(u8 musb_mode) +{ + u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2); + + devconf2 &= ~CONF2_OTGMODE; + switch (musb_mode) { +#ifdef CONFIG_USB_MUSB_HDRC_HCD + case MUSB_HOST: /* Force VBUS valid, ID = 0 */ + devconf2 |= CONF2_FORCE_HOST; + break; +#endif +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */ + devconf2 |= CONF2_FORCE_DEVICE; + break; +#endif +#ifdef CONFIG_USB_MUSB_OTG + case MUSB_OTG: /* Don't override the VBUS/ID comparators */ + devconf2 |= CONF2_NO_OVERRIDE; + break; +#endif + default: + pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode); + } + + omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2); +} diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c index 5298949d4b11..a9d4d143086d 100644 --- a/arch/arm/mach-omap2/usb-musb.c +++ b/arch/arm/mach-omap2/usb-musb.c @@ -30,118 +30,11 @@ #include <mach/irqs.h> #include <mach/am35xx.h> #include <plat/usb.h> -#include "control.h" +#include <plat/omap_device.h> +#include "mux.h" #if defined(CONFIG_USB_MUSB_OMAP2PLUS) || defined (CONFIG_USB_MUSB_AM35X) -static void am35x_musb_reset(void) -{ - u32 regval; - - /* Reset the musb interface */ - regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); - - regval |= AM35XX_USBOTGSS_SW_RST; - omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET); - - regval &= ~AM35XX_USBOTGSS_SW_RST; - omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET); - - regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); -} - -static void am35x_musb_phy_power(u8 on) -{ - unsigned long timeout = jiffies + msecs_to_jiffies(100); - u32 devconf2; - - if (on) { - /* - * Start the on-chip PHY and its PLL. - */ - devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2); - - devconf2 &= ~(CONF2_RESET | CONF2_PHYPWRDN | CONF2_OTGPWRDN); - devconf2 |= CONF2_PHY_PLLON; - - omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2); - - pr_info(KERN_INFO "Waiting for PHY clock good...\n"); - while (!(omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2) - & CONF2_PHYCLKGD)) { - cpu_relax(); - - if (time_after(jiffies, timeout)) { - pr_err(KERN_ERR "musb PHY clock good timed out\n"); - break; - } - } - } else { - /* - * Power down the on-chip PHY. - */ - devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2); - - devconf2 &= ~CONF2_PHY_PLLON; - devconf2 |= CONF2_PHYPWRDN | CONF2_OTGPWRDN; - omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2); - } -} - -static void am35x_musb_clear_irq(void) -{ - u32 regval; - - regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); - regval |= AM35XX_USBOTGSS_INT_CLR; - omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR); - regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); -} - -static void am35x_musb_set_mode(u8 musb_mode) -{ - u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2); - - devconf2 &= ~CONF2_OTGMODE; - switch (musb_mode) { -#ifdef CONFIG_USB_MUSB_HDRC_HCD - case MUSB_HOST: /* Force VBUS valid, ID = 0 */ - devconf2 |= CONF2_FORCE_HOST; - break; -#endif -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */ - devconf2 |= CONF2_FORCE_DEVICE; - break; -#endif -#ifdef CONFIG_USB_MUSB_OTG - case MUSB_OTG: /* Don't override the VBUS/ID comparators */ - devconf2 |= CONF2_NO_OVERRIDE; - break; -#endif - default: - pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode); - } - - omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2); -} - -static struct resource musb_resources[] = { - [0] = { /* start and end set dynamically */ - .flags = IORESOURCE_MEM, - }, - [1] = { /* general IRQ */ - .start = INT_243X_HS_USB_MC, - .flags = IORESOURCE_IRQ, - .name = "mc", - }, - [2] = { /* DMA IRQ */ - .start = INT_243X_HS_USB_DMA, - .flags = IORESOURCE_IRQ, - .name = "dma", - }, -}; - static struct musb_hdrc_config musb_config = { .multipoint = 1, .dyn_fifo = 1, @@ -169,38 +62,65 @@ static struct musb_hdrc_platform_data musb_plat = { static u64 musb_dmamask = DMA_BIT_MASK(32); -static struct platform_device musb_device = { - .name = "musb-omap2430", - .id = -1, - .dev = { - .dma_mask = &musb_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(32), - .platform_data = &musb_plat, +static struct omap_device_pm_latency omap_musb_latency[] = { + { + .deactivate_func = omap_device_idle_hwmods, + .activate_func = omap_device_enable_hwmods, + .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST, }, - .num_resources = ARRAY_SIZE(musb_resources), - .resource = musb_resources, }; +static void usb_musb_mux_init(struct omap_musb_board_data *board_data) +{ + switch (board_data->interface_type) { + case MUSB_INTERFACE_UTMI: + omap_mux_init_signal("usba0_otg_dp", OMAP_PIN_INPUT); + omap_mux_init_signal("usba0_otg_dm", OMAP_PIN_INPUT); + break; + case MUSB_INTERFACE_ULPI: + omap_mux_init_signal("usba0_ulpiphy_clk", + OMAP_PIN_INPUT_PULLDOWN); + omap_mux_init_signal("usba0_ulpiphy_stp", + OMAP_PIN_INPUT_PULLDOWN); + omap_mux_init_signal("usba0_ulpiphy_dir", + OMAP_PIN_INPUT_PULLDOWN); + omap_mux_init_signal("usba0_ulpiphy_nxt", + OMAP_PIN_INPUT_PULLDOWN); + omap_mux_init_signal("usba0_ulpiphy_dat0", + OMAP_PIN_INPUT_PULLDOWN); + omap_mux_init_signal("usba0_ulpiphy_dat1", + OMAP_PIN_INPUT_PULLDOWN); + omap_mux_init_signal("usba0_ulpiphy_dat2", + OMAP_PIN_INPUT_PULLDOWN); + omap_mux_init_signal("usba0_ulpiphy_dat3", + OMAP_PIN_INPUT_PULLDOWN); + omap_mux_init_signal("usba0_ulpiphy_dat4", + OMAP_PIN_INPUT_PULLDOWN); + omap_mux_init_signal("usba0_ulpiphy_dat5", + OMAP_PIN_INPUT_PULLDOWN); + omap_mux_init_signal("usba0_ulpiphy_dat6", + OMAP_PIN_INPUT_PULLDOWN); + omap_mux_init_signal("usba0_ulpiphy_dat7", + OMAP_PIN_INPUT_PULLDOWN); + break; + default: + break; + } +} + void __init usb_musb_init(struct omap_musb_board_data *board_data) { - if (cpu_is_omap243x()) { - musb_resources[0].start = OMAP243X_HS_BASE; - } else if (cpu_is_omap3517() || cpu_is_omap3505()) { - musb_device.name = "musb-am35x"; - musb_resources[0].start = AM35XX_IPSS_USBOTGSS_BASE; - musb_resources[1].start = INT_35XX_USBOTG_IRQ; - board_data->set_phy_power = am35x_musb_phy_power; - board_data->clear_irq = am35x_musb_clear_irq; - board_data->set_mode = am35x_musb_set_mode; - board_data->reset = am35x_musb_reset; - } else if (cpu_is_omap34xx()) { - musb_resources[0].start = OMAP34XX_HSUSB_OTG_BASE; + struct omap_hwmod *oh; + struct omap_device *od; + struct platform_device *pdev; + struct device *dev; + int bus_id = -1; + const char *oh_name, *name; + + if (cpu_is_omap3517() || cpu_is_omap3505()) { } else if (cpu_is_omap44xx()) { - musb_resources[0].start = OMAP44XX_HSUSB_OTG_BASE; - musb_resources[1].start = OMAP44XX_IRQ_HS_USB_MC_N; - musb_resources[2].start = OMAP44XX_IRQ_HS_USB_DMA_N; + usb_musb_mux_init(board_data); } - musb_resources[0].end = musb_resources[0].start + SZ_4K - 1; /* * REVISIT: This line can be removed once all the platforms using @@ -212,8 +132,35 @@ void __init usb_musb_init(struct omap_musb_board_data *board_data) musb_plat.mode = board_data->mode; musb_plat.extvbus = board_data->extvbus; - if (platform_device_register(&musb_device) < 0) - printk(KERN_ERR "Unable to register HS-USB (MUSB) device\n"); + if (cpu_is_omap3517() || cpu_is_omap3505()) { + oh_name = "am35x_otg_hs"; + name = "musb-am35x"; + } else { + oh_name = "usb_otg_hs"; + name = "musb-omap2430"; + } + + oh = omap_hwmod_lookup(oh_name); + if (!oh) { + pr_err("Could not look up %s\n", oh_name); + return; + } + + od = omap_device_build(name, bus_id, oh, &musb_plat, + sizeof(musb_plat), omap_musb_latency, + ARRAY_SIZE(omap_musb_latency), false); + if (IS_ERR(od)) { + pr_err("Could not build omap_device for %s %s\n", + name, oh_name); + return; + } + + pdev = &od->pdev; + dev = &pdev->dev; + get_device(dev); + dev->dma_mask = &musb_dmamask; + dev->coherent_dma_mask = musb_dmamask; + put_device(dev); } #else diff --git a/arch/arm/plat-omap/include/plat/gpmc.h b/arch/arm/plat-omap/include/plat/gpmc.h index 85ded598853e..12b316165037 100644 --- a/arch/arm/plat-omap/include/plat/gpmc.h +++ b/arch/arm/plat-omap/include/plat/gpmc.h @@ -41,6 +41,8 @@ #define GPMC_NAND_ADDRESS 0x0000000b #define GPMC_NAND_DATA 0x0000000c +#define GPMC_ENABLE_IRQ 0x0000000d + /* ECC commands */ #define GPMC_ECC_READ 0 /* Reset Hardware ECC for read */ #define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */ @@ -78,6 +80,19 @@ #define WR_RD_PIN_MONITORING 0x00600000 #define GPMC_PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F) #define GPMC_PREFETCH_STATUS_COUNT(val) (val & 0x00003fff) +#define GPMC_IRQ_FIFOEVENTENABLE 0x01 +#define GPMC_IRQ_COUNT_EVENT 0x02 + +#define PREFETCH_FIFOTHRESHOLD_MAX 0x40 +#define PREFETCH_FIFOTHRESHOLD(val) ((val) << 8) + +enum omap_ecc { + /* 1-bit ecc: stored at end of spare area */ + OMAP_ECC_HAMMING_CODE_DEFAULT = 0, /* Default, s/w method */ + OMAP_ECC_HAMMING_CODE_HW, /* gpmc to detect the error */ + /* 1-bit ecc: stored at begining of spare area as romcode */ + OMAP_ECC_HAMMING_CODE_HW_ROMCODE, /* gpmc method & romcode layout */ +}; /* * Note that all values in this struct are in nanoseconds except sync_clk @@ -130,12 +145,11 @@ extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base); extern void gpmc_cs_free(int cs); extern int gpmc_cs_set_reserved(int cs, int reserved); extern int gpmc_cs_reserved(int cs); -extern int gpmc_prefetch_enable(int cs, int dma_mode, +extern int gpmc_prefetch_enable(int cs, int fifo_th, int dma_mode, unsigned int u32_count, int is_write); extern int gpmc_prefetch_reset(int cs); extern void omap3_gpmc_save_context(void); extern void omap3_gpmc_restore_context(void); -extern void gpmc_init(void); extern int gpmc_read_status(int cmd); extern int gpmc_cs_configure(int cs, int cmd, int wval); extern int gpmc_nand_read(int cs, int cmd); diff --git a/arch/arm/plat-omap/include/plat/iommu.h b/arch/arm/plat-omap/include/plat/iommu.h index 69230d685538..19cbb5e9ece2 100644 --- a/arch/arm/plat-omap/include/plat/iommu.h +++ b/arch/arm/plat-omap/include/plat/iommu.h @@ -154,6 +154,8 @@ extern void flush_iotlb_range(struct iommu *obj, u32 start, u32 end); extern void flush_iotlb_all(struct iommu *obj); extern int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e); +extern void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, + u32 **ppte); extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova); extern int iommu_set_da_range(struct iommu *obj, u32 start, u32 end); diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h index 2910de921c52..1b911681e911 100644 --- a/arch/arm/plat-omap/include/plat/irqs.h +++ b/arch/arm/plat-omap/include/plat/irqs.h @@ -318,6 +318,7 @@ #define INT_34XX_PRCM_MPU_IRQ 11 #define INT_34XX_MCBSP1_IRQ 16 #define INT_34XX_MCBSP2_IRQ 17 +#define INT_34XX_GPMC_IRQ 20 #define INT_34XX_MCBSP3_IRQ 22 #define INT_34XX_MCBSP4_IRQ 23 #define INT_34XX_CAM_IRQ 24 @@ -411,7 +412,13 @@ #define TWL_IRQ_END TWL6030_IRQ_END #endif -#define NR_IRQS TWL_IRQ_END +/* GPMC related */ +#define OMAP_GPMC_IRQ_BASE (TWL_IRQ_END) +#define OMAP_GPMC_NR_IRQS 7 +#define OMAP_GPMC_IRQ_END (OMAP_GPMC_IRQ_BASE + OMAP_GPMC_NR_IRQS) + + +#define NR_IRQS OMAP_GPMC_IRQ_END #define OMAP_IRQ_BIT(irq) (1 << ((irq) % 32)) diff --git a/arch/arm/plat-omap/include/plat/mcspi.h b/arch/arm/plat-omap/include/plat/mcspi.h index 1254e4945b6f..3d51b18131cc 100644 --- a/arch/arm/plat-omap/include/plat/mcspi.h +++ b/arch/arm/plat-omap/include/plat/mcspi.h @@ -1,8 +1,19 @@ #ifndef _OMAP2_MCSPI_H #define _OMAP2_MCSPI_H +#define OMAP2_MCSPI_REV 0 +#define OMAP3_MCSPI_REV 1 +#define OMAP4_MCSPI_REV 2 + +#define OMAP4_MCSPI_REG_OFFSET 0x100 + struct omap2_mcspi_platform_config { unsigned short num_cs; + unsigned int regs_offset; +}; + +struct omap2_mcspi_dev_attr { + unsigned short num_chipselect; }; struct omap2_mcspi_device_config { diff --git a/arch/arm/plat-omap/include/plat/nand.h b/arch/arm/plat-omap/include/plat/nand.h index 6562cd082bb1..d86d1ecf0068 100644 --- a/arch/arm/plat-omap/include/plat/nand.h +++ b/arch/arm/plat-omap/include/plat/nand.h @@ -8,8 +8,16 @@ * published by the Free Software Foundation. */ +#include <plat/gpmc.h> #include <linux/mtd/partitions.h> +enum nand_io { + NAND_OMAP_PREFETCH_POLLED = 0, /* prefetch polled mode, default */ + NAND_OMAP_POLLED, /* polled mode, without prefetch */ + NAND_OMAP_PREFETCH_DMA, /* prefetch enabled sDMA mode */ + NAND_OMAP_PREFETCH_IRQ /* prefetch enabled irq mode */ +}; + struct omap_nand_platform_data { unsigned int options; int cs; @@ -20,8 +28,11 @@ struct omap_nand_platform_data { int (*nand_setup)(void); int (*dev_ready)(struct omap_nand_platform_data *); int dma_channel; + int gpmc_irq; + enum nand_io xfer_type; unsigned long phys_base; int devsize; + enum omap_ecc ecc_opt; }; /* minimum size for IO mapping */ diff --git a/arch/arm/plat-omap/include/plat/onenand.h b/arch/arm/plat-omap/include/plat/onenand.h index affe87e9ece7..cbe897ca7f9e 100644 --- a/arch/arm/plat-omap/include/plat/onenand.h +++ b/arch/arm/plat-omap/include/plat/onenand.h @@ -15,12 +15,20 @@ #define ONENAND_SYNC_READ (1 << 0) #define ONENAND_SYNC_READWRITE (1 << 1) +struct onenand_freq_info { + u16 maf_id; + u16 dev_id; + u16 ver_id; +}; + struct omap_onenand_platform_data { int cs; int gpio_irq; struct mtd_partition *parts; int nr_parts; - int (*onenand_setup)(void __iomem *, int freq); + int (*onenand_setup)(void __iomem *, int *freq_ptr); + int (*get_freq)(const struct onenand_freq_info *freq_info, + bool *clk_dep); int dma_channel; u8 flags; u8 regulator_can_sleep; diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h index 450a332f1009..077192759afc 100644 --- a/arch/arm/plat-omap/include/plat/usb.h +++ b/arch/arm/plat-omap/include/plat/usb.h @@ -91,6 +91,10 @@ extern int omap4430_phy_exit(struct device *dev); #endif +extern void am35x_musb_reset(void); +extern void am35x_musb_phy_power(u8 on); +extern void am35x_musb_clear_irq(void); +extern void am35x_musb_set_mode(u8 musb_mode); /* * FIXME correct answer depends on hmc_mode, diff --git a/drivers/Kconfig b/drivers/Kconfig index 9bfb71ff3a6a..177c7d156933 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -117,4 +117,6 @@ source "drivers/staging/Kconfig" source "drivers/platform/Kconfig" source "drivers/clk/Kconfig" + +source "drivers/hwspinlock/Kconfig" endmenu diff --git a/drivers/Makefile b/drivers/Makefile index b423bb16c3a8..3f135b6fb014 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -117,3 +117,5 @@ obj-y += platform/ obj-y += ieee802154/ #common clk code obj-y += clk/ + +obj-$(CONFIG_HWSPINLOCK) += hwspinlock/ diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig new file mode 100644 index 000000000000..eb4af28f8567 --- /dev/null +++ b/drivers/hwspinlock/Kconfig @@ -0,0 +1,22 @@ +# +# Generic HWSPINLOCK framework +# + +config HWSPINLOCK + tristate "Generic Hardware Spinlock framework" + help + Say y here to support the generic hardware spinlock framework. + You only need to enable this if you have hardware spinlock module + on your system (usually only relevant if your system has remote slave + coprocessors). + + If unsure, say N. + +config HWSPINLOCK_OMAP + tristate "OMAP Hardware Spinlock device" + depends on HWSPINLOCK && ARCH_OMAP4 + help + Say y here to support the OMAP Hardware Spinlock device (firstly + introduced in OMAP4). + + If unsure, say N. diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile new file mode 100644 index 000000000000..5729a3f7ed3d --- /dev/null +++ b/drivers/hwspinlock/Makefile @@ -0,0 +1,6 @@ +# +# Generic Hardware Spinlock framework +# + +obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o +obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c new file mode 100644 index 000000000000..43a62714b4fb --- /dev/null +++ b/drivers/hwspinlock/hwspinlock_core.c @@ -0,0 +1,548 @@ +/* + * Hardware spinlock framework + * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com + * + * Contact: Ohad Ben-Cohen <[email protected]> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/err.h> +#include <linux/jiffies.h> +#include <linux/radix-tree.h> +#include <linux/hwspinlock.h> +#include <linux/pm_runtime.h> + +#include "hwspinlock_internal.h" + +/* radix tree tags */ +#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */ + +/* + * A radix tree is used to maintain the available hwspinlock instances. + * The tree associates hwspinlock pointers with their integer key id, + * and provides easy-to-use API which makes the hwspinlock core code simple + * and easy to read. + * + * Radix trees are quick on lookups, and reasonably efficient in terms of + * storage, especially with high density usages such as this framework + * requires (a continuous range of integer keys, beginning with zero, is + * used as the ID's of the hwspinlock instances). + * + * The radix tree API supports tagging items in the tree, which this + * framework uses to mark unused hwspinlock instances (see the + * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the + * tree, looking for an unused hwspinlock instance, is now reduced to a + * single radix tree API call. + */ +static RADIX_TREE(hwspinlock_tree, GFP_KERNEL); + +/* + * Synchronization of access to the tree is achieved using this spinlock, + * as the radix-tree API requires that users provide all synchronisation. + */ +static DEFINE_SPINLOCK(hwspinlock_tree_lock); + +/** + * __hwspin_trylock() - attempt to lock a specific hwspinlock + * @hwlock: an hwspinlock which we want to trylock + * @mode: controls whether local interrupts are disabled or not + * @flags: a pointer where the caller's interrupt state will be saved at (if + * requested) + * + * This function attempts to lock an hwspinlock, and will immediately + * fail if the hwspinlock is already taken. + * + * Upon a successful return from this function, preemption (and possibly + * interrupts) is disabled, so the caller must not sleep, and is advised to + * release the hwspinlock as soon as possible. This is required in order to + * minimize remote cores polling on the hardware interconnect. + * + * The user decides whether local interrupts are disabled or not, and if yes, + * whether he wants their previous state to be saved. It is up to the user + * to choose the appropriate @mode of operation, exactly the same way users + * should decide between spin_trylock, spin_trylock_irq and + * spin_trylock_irqsave. + * + * Returns 0 if we successfully locked the hwspinlock or -EBUSY if + * the hwspinlock was already taken. + * This function will never sleep. + */ +int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) +{ + int ret; + + BUG_ON(!hwlock); + BUG_ON(!flags && mode == HWLOCK_IRQSTATE); + + /* + * This spin_lock{_irq, _irqsave} serves three purposes: + * + * 1. Disable preemption, in order to minimize the period of time + * in which the hwspinlock is taken. This is important in order + * to minimize the possible polling on the hardware interconnect + * by a remote user of this lock. + * 2. Make the hwspinlock SMP-safe (so we can take it from + * additional contexts on the local host). + * 3. Ensure that in_atomic/might_sleep checks catch potential + * problems with hwspinlock usage (e.g. scheduler checks like + * 'scheduling while atomic' etc.) + */ + if (mode == HWLOCK_IRQSTATE) + ret = spin_trylock_irqsave(&hwlock->lock, *flags); + else if (mode == HWLOCK_IRQ) + ret = spin_trylock_irq(&hwlock->lock); + else + ret = spin_trylock(&hwlock->lock); + + /* is lock already taken by another context on the local cpu ? */ + if (!ret) + return -EBUSY; + + /* try to take the hwspinlock device */ + ret = hwlock->ops->trylock(hwlock); + + /* if hwlock is already taken, undo spin_trylock_* and exit */ + if (!ret) { + if (mode == HWLOCK_IRQSTATE) + spin_unlock_irqrestore(&hwlock->lock, *flags); + else if (mode == HWLOCK_IRQ) + spin_unlock_irq(&hwlock->lock); + else + spin_unlock(&hwlock->lock); + + return -EBUSY; + } + + /* + * We can be sure the other core's memory operations + * are observable to us only _after_ we successfully take + * the hwspinlock, and we must make sure that subsequent memory + * operations (both reads and writes) will not be reordered before + * we actually took the hwspinlock. + * + * Note: the implicit memory barrier of the spinlock above is too + * early, so we need this additional explicit memory barrier. + */ + mb(); + + return 0; +} +EXPORT_SYMBOL_GPL(__hwspin_trylock); + +/** + * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit + * @hwlock: the hwspinlock to be locked + * @timeout: timeout value in msecs + * @mode: mode which controls whether local interrupts are disabled or not + * @flags: a pointer to where the caller's interrupt state will be saved at (if + * requested) + * + * This function locks the given @hwlock. If the @hwlock + * is already taken, the function will busy loop waiting for it to + * be released, but give up after @timeout msecs have elapsed. + * + * Upon a successful return from this function, preemption is disabled + * (and possibly local interrupts, too), so the caller must not sleep, + * and is advised to release the hwspinlock as soon as possible. + * This is required in order to minimize remote cores polling on the + * hardware interconnect. + * + * The user decides whether local interrupts are disabled or not, and if yes, + * whether he wants their previous state to be saved. It is up to the user + * to choose the appropriate @mode of operation, exactly the same way users + * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave. + * + * Returns 0 when the @hwlock was successfully taken, and an appropriate + * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still + * busy after @timeout msecs). The function will never sleep. + */ +int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, + int mode, unsigned long *flags) +{ + int ret; + unsigned long expire; + + expire = msecs_to_jiffies(to) + jiffies; + + for (;;) { + /* Try to take the hwspinlock */ + ret = __hwspin_trylock(hwlock, mode, flags); + if (ret != -EBUSY) + break; + + /* + * The lock is already taken, let's check if the user wants + * us to try again + */ + if (time_is_before_eq_jiffies(expire)) + return -ETIMEDOUT; + + /* + * Allow platform-specific relax handlers to prevent + * hogging the interconnect (no sleeping, though) + */ + if (hwlock->ops->relax) + hwlock->ops->relax(hwlock); + } + + return ret; +} +EXPORT_SYMBOL_GPL(__hwspin_lock_timeout); + +/** + * __hwspin_unlock() - unlock a specific hwspinlock + * @hwlock: a previously-acquired hwspinlock which we want to unlock + * @mode: controls whether local interrupts needs to be restored or not + * @flags: previous caller's interrupt state to restore (if requested) + * + * This function will unlock a specific hwspinlock, enable preemption and + * (possibly) enable interrupts or restore their previous state. + * @hwlock must be already locked before calling this function: it is a bug + * to call unlock on a @hwlock that is already unlocked. + * + * The user decides whether local interrupts should be enabled or not, and + * if yes, whether he wants their previous state to be restored. It is up + * to the user to choose the appropriate @mode of operation, exactly the + * same way users decide between spin_unlock, spin_unlock_irq and + * spin_unlock_irqrestore. + * + * The function will never sleep. + */ +void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) +{ + BUG_ON(!hwlock); + BUG_ON(!flags && mode == HWLOCK_IRQSTATE); + + /* + * We must make sure that memory operations (both reads and writes), + * done before unlocking the hwspinlock, will not be reordered + * after the lock is released. + * + * That's the purpose of this explicit memory barrier. + * + * Note: the memory barrier induced by the spin_unlock below is too + * late; the other core is going to access memory soon after it will + * take the hwspinlock, and by then we want to be sure our memory + * operations are already observable. + */ + mb(); + + hwlock->ops->unlock(hwlock); + + /* Undo the spin_trylock{_irq, _irqsave} called while locking */ + if (mode == HWLOCK_IRQSTATE) + spin_unlock_irqrestore(&hwlock->lock, *flags); + else if (mode == HWLOCK_IRQ) + spin_unlock_irq(&hwlock->lock); + else + spin_unlock(&hwlock->lock); +} +EXPORT_SYMBOL_GPL(__hwspin_unlock); + +/** + * hwspin_lock_register() - register a new hw spinlock + * @hwlock: hwspinlock to register. + * + * This function should be called from the underlying platform-specific + * implementation, to register a new hwspinlock instance. + * + * Can be called from an atomic context (will not sleep) but not from + * within interrupt context. + * + * Returns 0 on success, or an appropriate error code on failure + */ +int hwspin_lock_register(struct hwspinlock *hwlock) +{ + struct hwspinlock *tmp; + int ret; + + if (!hwlock || !hwlock->ops || + !hwlock->ops->trylock || !hwlock->ops->unlock) { + pr_err("invalid parameters\n"); + return -EINVAL; + } + + spin_lock_init(&hwlock->lock); + + spin_lock(&hwspinlock_tree_lock); + + ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock); + if (ret) + goto out; + + /* mark this hwspinlock as available */ + tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id, + HWSPINLOCK_UNUSED); + + /* self-sanity check which should never fail */ + WARN_ON(tmp != hwlock); + +out: + spin_unlock(&hwspinlock_tree_lock); + return ret; +} +EXPORT_SYMBOL_GPL(hwspin_lock_register); + +/** + * hwspin_lock_unregister() - unregister an hw spinlock + * @id: index of the specific hwspinlock to unregister + * + * This function should be called from the underlying platform-specific + * implementation, to unregister an existing (and unused) hwspinlock. + * + * Can be called from an atomic context (will not sleep) but not from + * within interrupt context. + * + * Returns the address of hwspinlock @id on success, or NULL on failure + */ +struct hwspinlock *hwspin_lock_unregister(unsigned int id) +{ + struct hwspinlock *hwlock = NULL; + int ret; + + spin_lock(&hwspinlock_tree_lock); + + /* make sure the hwspinlock is not in use (tag is set) */ + ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); + if (ret == 0) { + pr_err("hwspinlock %d still in use (or not present)\n", id); + goto out; + } + + hwlock = radix_tree_delete(&hwspinlock_tree, id); + if (!hwlock) { + pr_err("failed to delete hwspinlock %d\n", id); + goto out; + } + +out: + spin_unlock(&hwspinlock_tree_lock); + return hwlock; +} +EXPORT_SYMBOL_GPL(hwspin_lock_unregister); + +/** + * __hwspin_lock_request() - tag an hwspinlock as used and power it up + * + * This is an internal function that prepares an hwspinlock instance + * before it is given to the user. The function assumes that + * hwspinlock_tree_lock is taken. + * + * Returns 0 or positive to indicate success, and a negative value to + * indicate an error (with the appropriate error code) + */ +static int __hwspin_lock_request(struct hwspinlock *hwlock) +{ + struct hwspinlock *tmp; + int ret; + + /* prevent underlying implementation from being removed */ + if (!try_module_get(hwlock->owner)) { + dev_err(hwlock->dev, "%s: can't get owner\n", __func__); + return -EINVAL; + } + + /* notify PM core that power is now needed */ + ret = pm_runtime_get_sync(hwlock->dev); + if (ret < 0) { + dev_err(hwlock->dev, "%s: can't power on device\n", __func__); + return ret; + } + + /* mark hwspinlock as used, should not fail */ + tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock->id, + HWSPINLOCK_UNUSED); + + /* self-sanity check that should never fail */ + WARN_ON(tmp != hwlock); + + return ret; +} + +/** + * hwspin_lock_get_id() - retrieve id number of a given hwspinlock + * @hwlock: a valid hwspinlock instance + * + * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid. + */ +int hwspin_lock_get_id(struct hwspinlock *hwlock) +{ + if (!hwlock) { + pr_err("invalid hwlock\n"); + return -EINVAL; + } + + return hwlock->id; +} +EXPORT_SYMBOL_GPL(hwspin_lock_get_id); + +/** + * hwspin_lock_request() - request an hwspinlock + * + * This function should be called by users of the hwspinlock device, + * in order to dynamically assign them an unused hwspinlock. + * Usually the user of this lock will then have to communicate the lock's id + * to the remote core before it can be used for synchronization (to get the + * id of a given hwlock, use hwspin_lock_get_id()). + * + * Can be called from an atomic context (will not sleep) but not from + * within interrupt context (simply because there is no use case for + * that yet). + * + * Returns the address of the assigned hwspinlock, or NULL on error + */ +struct hwspinlock *hwspin_lock_request(void) +{ + struct hwspinlock *hwlock; + int ret; + + spin_lock(&hwspinlock_tree_lock); + + /* look for an unused lock */ + ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, + 0, 1, HWSPINLOCK_UNUSED); + if (ret == 0) { + pr_warn("a free hwspinlock is not available\n"); + hwlock = NULL; + goto out; + } + + /* sanity check that should never fail */ + WARN_ON(ret > 1); + + /* mark as used and power up */ + ret = __hwspin_lock_request(hwlock); + if (ret < 0) + hwlock = NULL; + +out: + spin_unlock(&hwspinlock_tree_lock); + return hwlock; +} +EXPORT_SYMBOL_GPL(hwspin_lock_request); + +/** + * hwspin_lock_request_specific() - request for a specific hwspinlock + * @id: index of the specific hwspinlock that is requested + * + * This function should be called by users of the hwspinlock module, + * in order to assign them a specific hwspinlock. + * Usually early board code will be calling this function in order to + * reserve specific hwspinlock ids for predefined purposes. + * + * Can be called from an atomic context (will not sleep) but not from + * within interrupt context (simply because there is no use case for + * that yet). + * + * Returns the address of the assigned hwspinlock, or NULL on error + */ +struct hwspinlock *hwspin_lock_request_specific(unsigned int id) +{ + struct hwspinlock *hwlock; + int ret; + + spin_lock(&hwspinlock_tree_lock); + + /* make sure this hwspinlock exists */ + hwlock = radix_tree_lookup(&hwspinlock_tree, id); + if (!hwlock) { + pr_warn("hwspinlock %u does not exist\n", id); + goto out; + } + + /* sanity check (this shouldn't happen) */ + WARN_ON(hwlock->id != id); + + /* make sure this hwspinlock is unused */ + ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); + if (ret == 0) { + pr_warn("hwspinlock %u is already in use\n", id); + hwlock = NULL; + goto out; + } + + /* mark as used and power up */ + ret = __hwspin_lock_request(hwlock); + if (ret < 0) + hwlock = NULL; + +out: + spin_unlock(&hwspinlock_tree_lock); + return hwlock; +} +EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); + +/** + * hwspin_lock_free() - free a specific hwspinlock + * @hwlock: the specific hwspinlock to free + * + * This function mark @hwlock as free again. + * Should only be called with an @hwlock that was retrieved from + * an earlier call to omap_hwspin_lock_request{_specific}. + * + * Can be called from an atomic context (will not sleep) but not from + * within interrupt context (simply because there is no use case for + * that yet). + * + * Returns 0 on success, or an appropriate error code on failure + */ +int hwspin_lock_free(struct hwspinlock *hwlock) +{ + struct hwspinlock *tmp; + int ret; + + if (!hwlock) { + pr_err("invalid hwlock\n"); + return -EINVAL; + } + + spin_lock(&hwspinlock_tree_lock); + + /* make sure the hwspinlock is used */ + ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id, + HWSPINLOCK_UNUSED); + if (ret == 1) { + dev_err(hwlock->dev, "%s: hwlock is already free\n", __func__); + dump_stack(); + ret = -EINVAL; + goto out; + } + + /* notify the underlying device that power is not needed */ + ret = pm_runtime_put(hwlock->dev); + if (ret < 0) + goto out; + + /* mark this hwspinlock as available */ + tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id, + HWSPINLOCK_UNUSED); + + /* sanity check (this shouldn't happen) */ + WARN_ON(tmp != hwlock); + + module_put(hwlock->owner); + +out: + spin_unlock(&hwspinlock_tree_lock); + return ret; +} +EXPORT_SYMBOL_GPL(hwspin_lock_free); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Hardware spinlock interface"); +MODULE_AUTHOR("Ohad Ben-Cohen <[email protected]>"); diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h new file mode 100644 index 000000000000..69935e6b93e5 --- /dev/null +++ b/drivers/hwspinlock/hwspinlock_internal.h @@ -0,0 +1,61 @@ +/* + * Hardware spinlocks internal header + * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com + * + * Contact: Ohad Ben-Cohen <[email protected]> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __HWSPINLOCK_HWSPINLOCK_H +#define __HWSPINLOCK_HWSPINLOCK_H + +#include <linux/spinlock.h> +#include <linux/device.h> + +/** + * struct hwspinlock_ops - platform-specific hwspinlock handlers + * + * @trylock: make a single attempt to take the lock. returns 0 on + * failure and true on success. may _not_ sleep. + * @unlock: release the lock. always succeed. may _not_ sleep. + * @relax: optional, platform-specific relax handler, called by hwspinlock + * core while spinning on a lock, between two successive + * invocations of @trylock. may _not_ sleep. + */ +struct hwspinlock_ops { + int (*trylock)(struct hwspinlock *lock); + void (*unlock)(struct hwspinlock *lock); + void (*relax)(struct hwspinlock *lock); +}; + +/** + * struct hwspinlock - this struct represents a single hwspinlock instance + * + * @dev: underlying device, will be used to invoke runtime PM api + * @ops: platform-specific hwspinlock handlers + * @id: a global, unique, system-wide, index of the lock. + * @lock: initialized and used by hwspinlock core + * @owner: underlying implementation module, used to maintain module ref count + * + * Note: currently simplicity was opted for, but later we can squeeze some + * memory bytes by grouping the dev, ops and owner members in a single + * per-platform struct, and have all hwspinlocks point at it. + */ +struct hwspinlock { + struct device *dev; + const struct hwspinlock_ops *ops; + int id; + spinlock_t lock; + struct module *owner; +}; + +#endif /* __HWSPINLOCK_HWSPINLOCK_H */ diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c new file mode 100644 index 000000000000..a8f02734c026 --- /dev/null +++ b/drivers/hwspinlock/omap_hwspinlock.c @@ -0,0 +1,231 @@ +/* + * OMAP hardware spinlock driver + * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com + * + * Contact: Simon Que <[email protected]> + * Hari Kanigeri <[email protected]> + * Ohad Ben-Cohen <[email protected]> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/bitops.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/hwspinlock.h> +#include <linux/platform_device.h> + +#include "hwspinlock_internal.h" + +/* Spinlock register offsets */ +#define SYSSTATUS_OFFSET 0x0014 +#define LOCK_BASE_OFFSET 0x0800 + +#define SPINLOCK_NUMLOCKS_BIT_OFFSET (24) + +/* Possible values of SPINLOCK_LOCK_REG */ +#define SPINLOCK_NOTTAKEN (0) /* free */ +#define SPINLOCK_TAKEN (1) /* locked */ + +#define to_omap_hwspinlock(lock) \ + container_of(lock, struct omap_hwspinlock, lock) + +struct omap_hwspinlock { + struct hwspinlock lock; + void __iomem *addr; +}; + +struct omap_hwspinlock_state { + int num_locks; /* Total number of locks in system */ + void __iomem *io_base; /* Mapped base address */ +}; + +static int omap_hwspinlock_trylock(struct hwspinlock *lock) +{ + struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock); + + /* attempt to acquire the lock by reading its value */ + return (SPINLOCK_NOTTAKEN == readl(omap_lock->addr)); +} + +static void omap_hwspinlock_unlock(struct hwspinlock *lock) +{ + struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock); + + /* release the lock by writing 0 to it */ + writel(SPINLOCK_NOTTAKEN, omap_lock->addr); +} + +/* + * relax the OMAP interconnect while spinning on it. + * + * The specs recommended that the retry delay time will be + * just over half of the time that a requester would be + * expected to hold the lock. + * + * The number below is taken from an hardware specs example, + * obviously it is somewhat arbitrary. + */ +static void omap_hwspinlock_relax(struct hwspinlock *lock) +{ + ndelay(50); +} + +static const struct hwspinlock_ops omap_hwspinlock_ops = { + .trylock = omap_hwspinlock_trylock, + .unlock = omap_hwspinlock_unlock, + .relax = omap_hwspinlock_relax, +}; + +static int __devinit omap_hwspinlock_probe(struct platform_device *pdev) +{ + struct omap_hwspinlock *omap_lock; + struct omap_hwspinlock_state *state; + struct hwspinlock *lock; + struct resource *res; + void __iomem *io_base; + int i, ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + io_base = ioremap(res->start, resource_size(res)); + if (!io_base) { + ret = -ENOMEM; + goto free_state; + } + + /* Determine number of locks */ + i = readl(io_base + SYSSTATUS_OFFSET); + i >>= SPINLOCK_NUMLOCKS_BIT_OFFSET; + + /* one of the four lsb's must be set, and nothing else */ + if (hweight_long(i & 0xf) != 1 || i > 8) { + ret = -EINVAL; + goto iounmap_base; + } + + state->num_locks = i * 32; + state->io_base = io_base; + + platform_set_drvdata(pdev, state); + + /* + * runtime PM will make sure the clock of this module is + * enabled iff at least one lock is requested + */ + pm_runtime_enable(&pdev->dev); + + for (i = 0; i < state->num_locks; i++) { + omap_lock = kzalloc(sizeof(*omap_lock), GFP_KERNEL); + if (!omap_lock) { + ret = -ENOMEM; + goto free_locks; + } + + omap_lock->lock.dev = &pdev->dev; + omap_lock->lock.owner = THIS_MODULE; + omap_lock->lock.id = i; + omap_lock->lock.ops = &omap_hwspinlock_ops; + omap_lock->addr = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i; + + ret = hwspin_lock_register(&omap_lock->lock); + if (ret) { + kfree(omap_lock); + goto free_locks; + } + } + + return 0; + +free_locks: + while (--i >= 0) { + lock = hwspin_lock_unregister(i); + /* this should't happen, but let's give our best effort */ + if (!lock) { + dev_err(&pdev->dev, "%s: cleanups failed\n", __func__); + continue; + } + omap_lock = to_omap_hwspinlock(lock); + kfree(omap_lock); + } + pm_runtime_disable(&pdev->dev); +iounmap_base: + iounmap(io_base); +free_state: + kfree(state); + return ret; +} + +static int omap_hwspinlock_remove(struct platform_device *pdev) +{ + struct omap_hwspinlock_state *state = platform_get_drvdata(pdev); + struct hwspinlock *lock; + struct omap_hwspinlock *omap_lock; + int i; + + for (i = 0; i < state->num_locks; i++) { + lock = hwspin_lock_unregister(i); + /* this shouldn't happen at this point. if it does, at least + * don't continue with the remove */ + if (!lock) { + dev_err(&pdev->dev, "%s: failed on %d\n", __func__, i); + return -EBUSY; + } + + omap_lock = to_omap_hwspinlock(lock); + kfree(omap_lock); + } + + pm_runtime_disable(&pdev->dev); + iounmap(state->io_base); + kfree(state); + + return 0; +} + +static struct platform_driver omap_hwspinlock_driver = { + .probe = omap_hwspinlock_probe, + .remove = omap_hwspinlock_remove, + .driver = { + .name = "omap_hwspinlock", + }, +}; + +static int __init omap_hwspinlock_init(void) +{ + return platform_driver_register(&omap_hwspinlock_driver); +} +/* board init code might need to reserve hwspinlocks for predefined purposes */ +postcore_initcall(omap_hwspinlock_init); + +static void __exit omap_hwspinlock_exit(void) +{ + platform_driver_unregister(&omap_hwspinlock_driver); +} +module_exit(omap_hwspinlock_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Hardware spinlock driver for OMAP"); +MODULE_AUTHOR("Simon Que <[email protected]>"); +MODULE_AUTHOR("Hari Kanigeri <[email protected]>"); +MODULE_AUTHOR("Ohad Ben-Cohen <[email protected]>"); diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 078fdf11af03..8c42573f42ea 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -260,7 +260,7 @@ static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on, return ret; } -static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on, +static int omap_hsmmc_235_set_power(struct device *dev, int slot, int power_on, int vdd) { struct omap_hsmmc_host *host = @@ -316,6 +316,12 @@ static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on, return ret; } +static int omap_hsmmc_4_set_power(struct device *dev, int slot, int power_on, + int vdd) +{ + return 0; +} + static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep, int vdd, int cardsleep) { @@ -326,7 +332,7 @@ static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep, return regulator_set_mode(host->vcc, mode); } -static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep, +static int omap_hsmmc_235_set_sleep(struct device *dev, int slot, int sleep, int vdd, int cardsleep) { struct omap_hsmmc_host *host = @@ -365,6 +371,12 @@ static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep, return regulator_enable(host->vcc_aux); } +static int omap_hsmmc_4_set_sleep(struct device *dev, int slot, int sleep, + int vdd, int cardsleep) +{ + return 0; +} + static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) { struct regulator *reg; @@ -379,10 +391,14 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) break; case OMAP_MMC2_DEVID: case OMAP_MMC3_DEVID: + case OMAP_MMC5_DEVID: /* Off-chip level shifting, or none */ - mmc_slot(host).set_power = omap_hsmmc_23_set_power; - mmc_slot(host).set_sleep = omap_hsmmc_23_set_sleep; + mmc_slot(host).set_power = omap_hsmmc_235_set_power; + mmc_slot(host).set_sleep = omap_hsmmc_235_set_sleep; break; + case OMAP_MMC4_DEVID: + mmc_slot(host).set_power = omap_hsmmc_4_set_power; + mmc_slot(host).set_sleep = omap_hsmmc_4_set_sleep; default: pr_err("MMC%d configuration not supported!\n", host->id); return -EINVAL; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index c89592239bc7..178e2006063d 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -106,23 +106,6 @@ config MTD_NAND_OMAP2 help Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms. -config MTD_NAND_OMAP_PREFETCH - bool "GPMC prefetch support for NAND Flash device" - depends on MTD_NAND_OMAP2 - default y - help - The NAND device can be accessed for Read/Write using GPMC PREFETCH engine - to improve the performance. - -config MTD_NAND_OMAP_PREFETCH_DMA - depends on MTD_NAND_OMAP_PREFETCH - bool "DMA mode" - default n - help - The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode - or in DMA interrupt mode. - Say y for DMA mode or MPU mode will be used - config MTD_NAND_IDS tristate diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 15682ec8530e..4e33972ad17a 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -11,6 +11,7 @@ #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/delay.h> +#include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/mtd/mtd.h> @@ -24,6 +25,7 @@ #include <plat/nand.h> #define DRIVER_NAME "omap2-nand" +#define OMAP_NAND_TIMEOUT_MS 5000 #define NAND_Ecc_P1e (1 << 0) #define NAND_Ecc_P2e (1 << 1) @@ -96,26 +98,19 @@ static const char *part_probes[] = { "cmdlinepart", NULL }; #endif -#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH -static int use_prefetch = 1; - -/* "modprobe ... use_prefetch=0" etc */ -module_param(use_prefetch, bool, 0); -MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH"); - -#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA -static int use_dma = 1; +/* oob info generated runtime depending on ecc algorithm and layout selected */ +static struct nand_ecclayout omap_oobinfo; +/* Define some generic bad / good block scan pattern which are used + * while scanning a device for factory marked good / bad blocks + */ +static uint8_t scan_ff_pattern[] = { 0xff }; +static struct nand_bbt_descr bb_descrip_flashbased = { + .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES, + .offs = 0, + .len = 1, + .pattern = scan_ff_pattern, +}; -/* "modprobe ... use_dma=0" etc */ -module_param(use_dma, bool, 0); -MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); -#else -static const int use_dma; -#endif -#else -const int use_prefetch; -static const int use_dma; -#endif struct omap_nand_info { struct nand_hw_control controller; @@ -129,6 +124,13 @@ struct omap_nand_info { unsigned long phys_base; struct completion comp; int dma_ch; + int gpmc_irq; + enum { + OMAP_NAND_IO_READ = 0, /* read */ + OMAP_NAND_IO_WRITE, /* write */ + } iomode; + u_char *buf; + int buf_len; }; /** @@ -256,7 +258,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) } /* configure and start prefetch transfer */ - ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0); + ret = gpmc_prefetch_enable(info->gpmc_cs, + PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0); if (ret) { /* PFPW engine is busy, use cpu copy method */ if (info->nand.options & NAND_BUSWIDTH_16) @@ -288,9 +291,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd, { struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, mtd); - uint32_t pref_count = 0, w_count = 0; + uint32_t w_count = 0; int i = 0, ret = 0; u16 *p; + unsigned long tim, limit; /* take care of subpage writes */ if (len % 2 != 0) { @@ -300,7 +304,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd, } /* configure and start prefetch transfer */ - ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1); + ret = gpmc_prefetch_enable(info->gpmc_cs, + PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1); if (ret) { /* PFPW engine is busy, use cpu copy method */ if (info->nand.options & NAND_BUSWIDTH_16) @@ -316,15 +321,17 @@ static void omap_write_buf_pref(struct mtd_info *mtd, iowrite16(*p++, info->nand.IO_ADDR_W); } /* wait for data to flushed-out before reset the prefetch */ - do { - pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT); - } while (pref_count); + tim = 0; + limit = (loops_per_jiffy * + msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); + while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) + cpu_relax(); + /* disable and stop the PFPW engine */ gpmc_prefetch_reset(info->gpmc_cs); } } -#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA /* * omap_nand_dma_cb: callback on the completion of dma transfer * @lch: logical channel @@ -348,14 +355,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, { struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, mtd); - uint32_t prefetch_status = 0; enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; dma_addr_t dma_addr; int ret; + unsigned long tim, limit; - /* The fifo depth is 64 bytes. We have a sync at each frame and frame - * length is 64 bytes. + /* The fifo depth is 64 bytes max. + * But configure the FIFO-threahold to 32 to get a sync at each frame + * and frame length is 32 bytes. */ int buf_len = len >> 6; @@ -396,9 +404,10 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); } /* configure and start prefetch transfer */ - ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write); + ret = gpmc_prefetch_enable(info->gpmc_cs, + PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); if (ret) - /* PFPW engine is busy, use cpu copy methode */ + /* PFPW engine is busy, use cpu copy method */ goto out_copy; init_completion(&info->comp); @@ -407,10 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, /* setup and start DMA using dma_addr */ wait_for_completion(&info->comp); + tim = 0; + limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); + while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) + cpu_relax(); - do { - prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); - } while (prefetch_status); /* disable and stop the PFPW engine */ gpmc_prefetch_reset(info->gpmc_cs); @@ -426,14 +436,6 @@ out_copy: : omap_write_buf8(mtd, (u_char *) addr, len); return 0; } -#else -static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {} -static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, - unsigned int len, int is_write) -{ - return 0; -} -#endif /** * omap_read_buf_dma_pref - read data from NAND controller into buffer @@ -466,6 +468,157 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd, omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); } +/* + * omap_nand_irq - GMPC irq handler + * @this_irq: gpmc irq number + * @dev: omap_nand_info structure pointer is passed here + */ +static irqreturn_t omap_nand_irq(int this_irq, void *dev) +{ + struct omap_nand_info *info = (struct omap_nand_info *) dev; + u32 bytes; + u32 irq_stat; + + irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS); + bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); + bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */ + if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */ + if (irq_stat & 0x2) + goto done; + + if (info->buf_len && (info->buf_len < bytes)) + bytes = info->buf_len; + else if (!info->buf_len) + bytes = 0; + iowrite32_rep(info->nand.IO_ADDR_W, + (u32 *)info->buf, bytes >> 2); + info->buf = info->buf + bytes; + info->buf_len -= bytes; + + } else { + ioread32_rep(info->nand.IO_ADDR_R, + (u32 *)info->buf, bytes >> 2); + info->buf = info->buf + bytes; + + if (irq_stat & 0x2) + goto done; + } + gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); + + return IRQ_HANDLED; + +done: + complete(&info->comp); + /* disable irq */ + gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0); + + /* clear status */ + gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); + + return IRQ_HANDLED; +} + +/* + * omap_read_buf_irq_pref - read data from NAND controller into buffer + * @mtd: MTD device structure + * @buf: buffer to store date + * @len: number of bytes to read + */ +static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len) +{ + struct omap_nand_info *info = container_of(mtd, + struct omap_nand_info, mtd); + int ret = 0; + + if (len <= mtd->oobsize) { + omap_read_buf_pref(mtd, buf, len); + return; + } + + info->iomode = OMAP_NAND_IO_READ; + info->buf = buf; + init_completion(&info->comp); + + /* configure and start prefetch transfer */ + ret = gpmc_prefetch_enable(info->gpmc_cs, + PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0); + if (ret) + /* PFPW engine is busy, use cpu copy method */ + goto out_copy; + + info->buf_len = len; + /* enable irq */ + gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, + (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); + + /* waiting for read to complete */ + wait_for_completion(&info->comp); + + /* disable and stop the PFPW engine */ + gpmc_prefetch_reset(info->gpmc_cs); + return; + +out_copy: + if (info->nand.options & NAND_BUSWIDTH_16) + omap_read_buf16(mtd, buf, len); + else + omap_read_buf8(mtd, buf, len); +} + +/* + * omap_write_buf_irq_pref - write buffer to NAND controller + * @mtd: MTD device structure + * @buf: data buffer + * @len: number of bytes to write + */ +static void omap_write_buf_irq_pref(struct mtd_info *mtd, + const u_char *buf, int len) +{ + struct omap_nand_info *info = container_of(mtd, + struct omap_nand_info, mtd); + int ret = 0; + unsigned long tim, limit; + + if (len <= mtd->oobsize) { + omap_write_buf_pref(mtd, buf, len); + return; + } + + info->iomode = OMAP_NAND_IO_WRITE; + info->buf = (u_char *) buf; + init_completion(&info->comp); + + /* configure and start prefetch transfer : size=24 */ + ret = gpmc_prefetch_enable(info->gpmc_cs, + (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1); + if (ret) + /* PFPW engine is busy, use cpu copy method */ + goto out_copy; + + info->buf_len = len; + /* enable irq */ + gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, + (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); + + /* waiting for write to complete */ + wait_for_completion(&info->comp); + /* wait for data to flushed-out before reset the prefetch */ + tim = 0; + limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); + while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) + cpu_relax(); + + /* disable and stop the PFPW engine */ + gpmc_prefetch_reset(info->gpmc_cs); + return; + +out_copy: + if (info->nand.options & NAND_BUSWIDTH_16) + omap_write_buf16(mtd, buf, len); + else + omap_write_buf8(mtd, buf, len); +} + /** * omap_verify_buf - Verify chip data against buffer * @mtd: MTD device structure @@ -487,8 +640,6 @@ static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len) return 0; } -#ifdef CONFIG_MTD_NAND_OMAP_HWECC - /** * gen_true_ecc - This function will generate true ECC value * @ecc_buf: buffer to store ecc code @@ -708,8 +859,6 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode) gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size); } -#endif - /** * omap_wait - wait until the command is done * @mtd: MTD device structure @@ -779,6 +928,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) struct omap_nand_info *info; struct omap_nand_platform_data *pdata; int err; + int i, offset; pdata = pdev->dev.platform_data; if (pdata == NULL) { @@ -804,7 +954,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) info->mtd.name = dev_name(&pdev->dev); info->mtd.owner = THIS_MODULE; - info->nand.options |= pdata->devsize ? NAND_BUSWIDTH_16 : 0; + info->nand.options = pdata->devsize; info->nand.options |= NAND_SKIP_BBTSCAN; /* NAND write protect off */ @@ -842,28 +992,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) info->nand.chip_delay = 50; } - if (use_prefetch) { - + switch (pdata->xfer_type) { + case NAND_OMAP_PREFETCH_POLLED: info->nand.read_buf = omap_read_buf_pref; info->nand.write_buf = omap_write_buf_pref; - if (use_dma) { - err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", - omap_nand_dma_cb, &info->comp, &info->dma_ch); - if (err < 0) { - info->dma_ch = -1; - printk(KERN_WARNING "DMA request failed." - " Non-dma data transfer mode\n"); - } else { - omap_set_dma_dest_burst_mode(info->dma_ch, - OMAP_DMA_DATA_BURST_16); - omap_set_dma_src_burst_mode(info->dma_ch, - OMAP_DMA_DATA_BURST_16); - - info->nand.read_buf = omap_read_buf_dma_pref; - info->nand.write_buf = omap_write_buf_dma_pref; - } - } - } else { + break; + + case NAND_OMAP_POLLED: if (info->nand.options & NAND_BUSWIDTH_16) { info->nand.read_buf = omap_read_buf16; info->nand.write_buf = omap_write_buf16; @@ -871,20 +1006,61 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) info->nand.read_buf = omap_read_buf8; info->nand.write_buf = omap_write_buf8; } + break; + + case NAND_OMAP_PREFETCH_DMA: + err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", + omap_nand_dma_cb, &info->comp, &info->dma_ch); + if (err < 0) { + info->dma_ch = -1; + dev_err(&pdev->dev, "DMA request failed!\n"); + goto out_release_mem_region; + } else { + omap_set_dma_dest_burst_mode(info->dma_ch, + OMAP_DMA_DATA_BURST_16); + omap_set_dma_src_burst_mode(info->dma_ch, + OMAP_DMA_DATA_BURST_16); + + info->nand.read_buf = omap_read_buf_dma_pref; + info->nand.write_buf = omap_write_buf_dma_pref; + } + break; + + case NAND_OMAP_PREFETCH_IRQ: + err = request_irq(pdata->gpmc_irq, + omap_nand_irq, IRQF_SHARED, "gpmc-nand", info); + if (err) { + dev_err(&pdev->dev, "requesting irq(%d) error:%d", + pdata->gpmc_irq, err); + goto out_release_mem_region; + } else { + info->gpmc_irq = pdata->gpmc_irq; + info->nand.read_buf = omap_read_buf_irq_pref; + info->nand.write_buf = omap_write_buf_irq_pref; + } + break; + + default: + dev_err(&pdev->dev, + "xfer_type(%d) not supported!\n", pdata->xfer_type); + err = -EINVAL; + goto out_release_mem_region; } - info->nand.verify_buf = omap_verify_buf; -#ifdef CONFIG_MTD_NAND_OMAP_HWECC - info->nand.ecc.bytes = 3; - info->nand.ecc.size = 512; - info->nand.ecc.calculate = omap_calculate_ecc; - info->nand.ecc.hwctl = omap_enable_hwecc; - info->nand.ecc.correct = omap_correct_data; - info->nand.ecc.mode = NAND_ECC_HW; + info->nand.verify_buf = omap_verify_buf; -#else - info->nand.ecc.mode = NAND_ECC_SOFT; -#endif + /* selsect the ecc type */ + if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT) + info->nand.ecc.mode = NAND_ECC_SOFT; + else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) || + (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) { + info->nand.ecc.bytes = 3; + info->nand.ecc.size = 512; + info->nand.ecc.calculate = omap_calculate_ecc; + info->nand.ecc.hwctl = omap_enable_hwecc; + info->nand.ecc.correct = omap_correct_data; + info->nand.ecc.mode = NAND_ECC_HW; + } /* DIP switches on some boards change between 8 and 16 bit * bus widths for flash. Try the other width if the first try fails. @@ -897,6 +1073,26 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) } } + /* rom code layout */ + if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) { + + if (info->nand.options & NAND_BUSWIDTH_16) + offset = 2; + else { + offset = 1; + info->nand.badblock_pattern = &bb_descrip_flashbased; + } + omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16); + for (i = 0; i < omap_oobinfo.eccbytes; i++) + omap_oobinfo.eccpos[i] = i+offset; + + omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes; + omap_oobinfo.oobfree->length = info->mtd.oobsize - + (offset + omap_oobinfo.eccbytes); + + info->nand.ecc.layout = &omap_oobinfo; + } + #ifdef CONFIG_MTD_PARTITIONS err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); if (err > 0) @@ -926,9 +1122,12 @@ static int omap_nand_remove(struct platform_device *pdev) mtd); platform_set_drvdata(pdev, NULL); - if (use_dma) + if (info->dma_ch != -1) omap_free_dma(info->dma_ch); + if (info->gpmc_irq) + free_irq(info->gpmc_irq, info); + /* Release NAND device, its internal structures and partitions */ nand_release(&info->mtd); iounmap(info->nand.IO_ADDR_R); @@ -947,16 +1146,8 @@ static struct platform_driver omap_nand_driver = { static int __init omap_nand_init(void) { - printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME); + pr_info("%s driver initializing\n", DRIVER_NAME); - /* This check is required if driver is being - * loaded run time as a module - */ - if ((1 == use_dma) && (0 == use_prefetch)) { - printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 " - "without use_prefetch'. Prefetch will not be" - " used in either mode (mpu or dma)\n"); - } return platform_driver_register(&omap_nand_driver); } diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index ac31f461cc1c..ec26399e3cf2 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -63,7 +63,7 @@ struct omap2_onenand { struct completion dma_done; int dma_channel; int freq; - int (*setup)(void __iomem *base, int freq); + int (*setup)(void __iomem *base, int *freq_ptr); struct regulator *regulator; }; @@ -148,11 +148,9 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state) wait_err("controller error", state, ctrl, intr); return -EIO; } - if ((intr & intr_flags) != intr_flags) { - wait_err("timeout", state, ctrl, intr); - return -EIO; - } - return 0; + if ((intr & intr_flags) == intr_flags) + return 0; + /* Continue in wait for interrupt branch */ } if (state != FL_READING) { @@ -581,7 +579,7 @@ static int __adjust_timing(struct device *dev, void *data) /* DMA is not in use so this is all that is needed */ /* Revisit for OMAP3! */ - ret = c->setup(c->onenand.base, c->freq); + ret = c->setup(c->onenand.base, &c->freq); return ret; } @@ -673,7 +671,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) } if (pdata->onenand_setup != NULL) { - r = pdata->onenand_setup(c->onenand.base, c->freq); + r = pdata->onenand_setup(c->onenand.base, &c->freq); if (r < 0) { dev_err(&pdev->dev, "Onenand platform setup failed: " "%d\n", r); @@ -718,8 +716,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) } dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual " - "base %p\n", c->gpmc_cs, c->phys_base, - c->onenand.base); + "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base, + c->onenand.base, c->freq); c->pdev = pdev; c->mtd.name = dev_name(&pdev->dev); @@ -754,24 +752,6 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) if ((r = onenand_scan(&c->mtd, 1)) < 0) goto err_release_regulator; - switch ((c->onenand.version_id >> 4) & 0xf) { - case 0: - c->freq = 40; - break; - case 1: - c->freq = 54; - break; - case 2: - c->freq = 66; - break; - case 3: - c->freq = 83; - break; - case 4: - c->freq = 104; - break; - } - #ifdef CONFIG_MTD_PARTITIONS r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0); if (r > 0) diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c index f076cc5c6fb0..36501adc125d 100644 --- a/drivers/spi/omap2_mcspi.c +++ b/drivers/spi/omap2_mcspi.c @@ -3,7 +3,7 @@ * * Copyright (C) 2005, 2006 Nokia Corporation * Author: Samuel Ortiz <[email protected]> and - * Juha Yrj�l� <[email protected]> + * Juha Yrj�l� <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -33,6 +33,7 @@ #include <linux/clk.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/pm_runtime.h> #include <linux/spi/spi.h> @@ -46,7 +47,6 @@ #define OMAP2_MCSPI_MAX_CTRL 4 #define OMAP2_MCSPI_REVISION 0x00 -#define OMAP2_MCSPI_SYSCONFIG 0x10 #define OMAP2_MCSPI_SYSSTATUS 0x14 #define OMAP2_MCSPI_IRQSTATUS 0x18 #define OMAP2_MCSPI_IRQENABLE 0x1c @@ -63,13 +63,6 @@ /* per-register bitmasks: */ -#define OMAP2_MCSPI_SYSCONFIG_SMARTIDLE BIT(4) -#define OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP BIT(2) -#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE BIT(0) -#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET BIT(1) - -#define OMAP2_MCSPI_SYSSTATUS_RESETDONE BIT(0) - #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0) #define OMAP2_MCSPI_MODULCTRL_MS BIT(2) #define OMAP2_MCSPI_MODULCTRL_STEST BIT(3) @@ -122,13 +115,12 @@ struct omap2_mcspi { spinlock_t lock; struct list_head msg_queue; struct spi_master *master; - struct clk *ick; - struct clk *fck; /* Virtual base address of the controller */ void __iomem *base; unsigned long phys; /* SPI1 has 4 channels, while SPI2 has 2 */ struct omap2_mcspi_dma *dma_channels; + struct device *dev; }; struct omap2_mcspi_cs { @@ -144,7 +136,6 @@ struct omap2_mcspi_cs { * corresponding registers are modified. */ struct omap2_mcspi_regs { - u32 sysconfig; u32 modulctrl; u32 wakeupenable; struct list_head cs; @@ -268,9 +259,6 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi) mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl); - mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_SYSCONFIG, - omap2_mcspi_ctx[spi_cntrl->bus_num - 1].sysconfig); - mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable); @@ -280,20 +268,12 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi) } static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi) { - clk_disable(mcspi->ick); - clk_disable(mcspi->fck); + pm_runtime_put_sync(mcspi->dev); } static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi) { - if (clk_enable(mcspi->ick)) - return -ENODEV; - if (clk_enable(mcspi->fck)) - return -ENODEV; - - omap2_mcspi_restore_ctx(mcspi); - - return 0; + return pm_runtime_get_sync(mcspi->dev); } static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) @@ -819,8 +799,9 @@ static int omap2_mcspi_setup(struct spi_device *spi) return ret; } - if (omap2_mcspi_enable_clocks(mcspi)) - return -ENODEV; + ret = omap2_mcspi_enable_clocks(mcspi); + if (ret < 0) + return ret; ret = omap2_mcspi_setup_transfer(spi, NULL); omap2_mcspi_disable_clocks(mcspi); @@ -863,10 +844,11 @@ static void omap2_mcspi_work(struct work_struct *work) struct omap2_mcspi *mcspi; mcspi = container_of(work, struct omap2_mcspi, work); - spin_lock_irq(&mcspi->lock); - if (omap2_mcspi_enable_clocks(mcspi)) - goto out; + if (omap2_mcspi_enable_clocks(mcspi) < 0) + return; + + spin_lock_irq(&mcspi->lock); /* We only enable one channel at a time -- the one whose message is * at the head of the queue -- although this controller would gladly @@ -979,10 +961,9 @@ static void omap2_mcspi_work(struct work_struct *work) spin_lock_irq(&mcspi->lock); } - omap2_mcspi_disable_clocks(mcspi); - -out: spin_unlock_irq(&mcspi->lock); + + omap2_mcspi_disable_clocks(mcspi); } static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) @@ -1058,25 +1039,15 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) return 0; } -static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi) +static int __init omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) { struct spi_master *master = mcspi->master; u32 tmp; + int ret = 0; - if (omap2_mcspi_enable_clocks(mcspi)) - return -1; - - mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, - OMAP2_MCSPI_SYSCONFIG_SOFTRESET); - do { - tmp = mcspi_read_reg(master, OMAP2_MCSPI_SYSSTATUS); - } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE)); - - tmp = OMAP2_MCSPI_SYSCONFIG_AUTOIDLE | - OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP | - OMAP2_MCSPI_SYSCONFIG_SMARTIDLE; - mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, tmp); - omap2_mcspi_ctx[master->bus_num - 1].sysconfig = tmp; + ret = omap2_mcspi_enable_clocks(mcspi); + if (ret < 0) + return ret; tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN; mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp); @@ -1087,91 +1058,26 @@ static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi) return 0; } -static u8 __initdata spi1_rxdma_id [] = { - OMAP24XX_DMA_SPI1_RX0, - OMAP24XX_DMA_SPI1_RX1, - OMAP24XX_DMA_SPI1_RX2, - OMAP24XX_DMA_SPI1_RX3, -}; - -static u8 __initdata spi1_txdma_id [] = { - OMAP24XX_DMA_SPI1_TX0, - OMAP24XX_DMA_SPI1_TX1, - OMAP24XX_DMA_SPI1_TX2, - OMAP24XX_DMA_SPI1_TX3, -}; - -static u8 __initdata spi2_rxdma_id[] = { - OMAP24XX_DMA_SPI2_RX0, - OMAP24XX_DMA_SPI2_RX1, -}; - -static u8 __initdata spi2_txdma_id[] = { - OMAP24XX_DMA_SPI2_TX0, - OMAP24XX_DMA_SPI2_TX1, -}; - -#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \ - || defined(CONFIG_ARCH_OMAP4) -static u8 __initdata spi3_rxdma_id[] = { - OMAP24XX_DMA_SPI3_RX0, - OMAP24XX_DMA_SPI3_RX1, -}; +static int omap_mcspi_runtime_resume(struct device *dev) +{ + struct omap2_mcspi *mcspi; + struct spi_master *master; -static u8 __initdata spi3_txdma_id[] = { - OMAP24XX_DMA_SPI3_TX0, - OMAP24XX_DMA_SPI3_TX1, -}; -#endif + master = dev_get_drvdata(dev); + mcspi = spi_master_get_devdata(master); + omap2_mcspi_restore_ctx(mcspi); -#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) -static u8 __initdata spi4_rxdma_id[] = { - OMAP34XX_DMA_SPI4_RX0, -}; + return 0; +} -static u8 __initdata spi4_txdma_id[] = { - OMAP34XX_DMA_SPI4_TX0, -}; -#endif static int __init omap2_mcspi_probe(struct platform_device *pdev) { struct spi_master *master; + struct omap2_mcspi_platform_config *pdata = pdev->dev.platform_data; struct omap2_mcspi *mcspi; struct resource *r; int status = 0, i; - const u8 *rxdma_id, *txdma_id; - unsigned num_chipselect; - - switch (pdev->id) { - case 1: - rxdma_id = spi1_rxdma_id; - txdma_id = spi1_txdma_id; - num_chipselect = 4; - break; - case 2: - rxdma_id = spi2_rxdma_id; - txdma_id = spi2_txdma_id; - num_chipselect = 2; - break; -#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \ - || defined(CONFIG_ARCH_OMAP4) - case 3: - rxdma_id = spi3_rxdma_id; - txdma_id = spi3_txdma_id; - num_chipselect = 2; - break; -#endif -#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) - case 4: - rxdma_id = spi4_rxdma_id; - txdma_id = spi4_txdma_id; - num_chipselect = 1; - break; -#endif - default: - return -EINVAL; - } master = spi_alloc_master(&pdev->dev, sizeof *mcspi); if (master == NULL) { @@ -1188,7 +1094,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev) master->setup = omap2_mcspi_setup; master->transfer = omap2_mcspi_transfer; master->cleanup = omap2_mcspi_cleanup; - master->num_chipselect = num_chipselect; + master->num_chipselect = pdata->num_cs; dev_set_drvdata(&pdev->dev, master); @@ -1206,49 +1112,62 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev) goto err1; } + r->start += pdata->regs_offset; + r->end += pdata->regs_offset; mcspi->phys = r->start; mcspi->base = ioremap(r->start, r->end - r->start + 1); if (!mcspi->base) { dev_dbg(&pdev->dev, "can't ioremap MCSPI\n"); status = -ENOMEM; - goto err1aa; + goto err2; } + mcspi->dev = &pdev->dev; INIT_WORK(&mcspi->work, omap2_mcspi_work); spin_lock_init(&mcspi->lock); INIT_LIST_HEAD(&mcspi->msg_queue); INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs); - mcspi->ick = clk_get(&pdev->dev, "ick"); - if (IS_ERR(mcspi->ick)) { - dev_dbg(&pdev->dev, "can't get mcspi_ick\n"); - status = PTR_ERR(mcspi->ick); - goto err1a; - } - mcspi->fck = clk_get(&pdev->dev, "fck"); - if (IS_ERR(mcspi->fck)) { - dev_dbg(&pdev->dev, "can't get mcspi_fck\n"); - status = PTR_ERR(mcspi->fck); - goto err2; - } - mcspi->dma_channels = kcalloc(master->num_chipselect, sizeof(struct omap2_mcspi_dma), GFP_KERNEL); if (mcspi->dma_channels == NULL) - goto err3; + goto err2; + + for (i = 0; i < master->num_chipselect; i++) { + char dma_ch_name[14]; + struct resource *dma_res; + + sprintf(dma_ch_name, "rx%d", i); + dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, + dma_ch_name); + if (!dma_res) { + dev_dbg(&pdev->dev, "cannot get DMA RX channel\n"); + status = -ENODEV; + break; + } - for (i = 0; i < num_chipselect; i++) { mcspi->dma_channels[i].dma_rx_channel = -1; - mcspi->dma_channels[i].dma_rx_sync_dev = rxdma_id[i]; + mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start; + sprintf(dma_ch_name, "tx%d", i); + dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, + dma_ch_name); + if (!dma_res) { + dev_dbg(&pdev->dev, "cannot get DMA TX channel\n"); + status = -ENODEV; + break; + } + mcspi->dma_channels[i].dma_tx_channel = -1; - mcspi->dma_channels[i].dma_tx_sync_dev = txdma_id[i]; + mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start; } - if (omap2_mcspi_reset(mcspi) < 0) - goto err4; + pm_runtime_enable(&pdev->dev); + + if (status || omap2_mcspi_master_setup(mcspi) < 0) + goto err3; status = spi_register_master(master); if (status < 0) @@ -1257,17 +1176,13 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev) return status; err4: - kfree(mcspi->dma_channels); + spi_master_put(master); err3: - clk_put(mcspi->fck); + kfree(mcspi->dma_channels); err2: - clk_put(mcspi->ick); -err1a: - iounmap(mcspi->base); -err1aa: release_mem_region(r->start, (r->end - r->start) + 1); + iounmap(mcspi->base); err1: - spi_master_put(master); return status; } @@ -1283,9 +1198,7 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev) mcspi = spi_master_get_devdata(master); dma_channels = mcspi->dma_channels; - clk_put(mcspi->fck); - clk_put(mcspi->ick); - + omap2_mcspi_disable_clocks(mcspi); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(r->start, (r->end - r->start) + 1); @@ -1336,6 +1249,7 @@ static int omap2_mcspi_resume(struct device *dev) static const struct dev_pm_ops omap2_mcspi_pm_ops = { .resume = omap2_mcspi_resume, + .runtime_resume = omap_mcspi_runtime_resume, }; static struct platform_driver omap2_mcspi_driver = { diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h new file mode 100644 index 000000000000..8390efc457eb --- /dev/null +++ b/include/linux/hwspinlock.h @@ -0,0 +1,292 @@ +/* + * Hardware spinlock public header + * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com + * + * Contact: Ohad Ben-Cohen <[email protected]> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_HWSPINLOCK_H +#define __LINUX_HWSPINLOCK_H + +#include <linux/err.h> +#include <linux/sched.h> + +/* hwspinlock mode argument */ +#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */ +#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */ + +struct hwspinlock; + +#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE) + +int hwspin_lock_register(struct hwspinlock *lock); +struct hwspinlock *hwspin_lock_unregister(unsigned int id); +struct hwspinlock *hwspin_lock_request(void); +struct hwspinlock *hwspin_lock_request_specific(unsigned int id); +int hwspin_lock_free(struct hwspinlock *hwlock); +int hwspin_lock_get_id(struct hwspinlock *hwlock); +int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int, + unsigned long *); +int __hwspin_trylock(struct hwspinlock *, int, unsigned long *); +void __hwspin_unlock(struct hwspinlock *, int, unsigned long *); + +#else /* !CONFIG_HWSPINLOCK */ + +/* + * We don't want these functions to fail if CONFIG_HWSPINLOCK is not + * enabled. We prefer to silently succeed in this case, and let the + * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not + * required on a given setup, users will still work. + * + * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which + * we _do_ want users to fail (no point in registering hwspinlock instances if + * the framework is not available). + * + * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking + * users. Others, which care, can still check this with IS_ERR. + */ +static inline struct hwspinlock *hwspin_lock_request(void) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id) +{ + return ERR_PTR(-ENODEV); +} + +static inline int hwspin_lock_free(struct hwspinlock *hwlock) +{ + return 0; +} + +static inline +int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, + int mode, unsigned long *flags) +{ + return 0; +} + +static inline +int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) +{ + return 0; +} + +static inline +void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) +{ + return 0; +} + +static inline int hwspin_lock_get_id(struct hwspinlock *hwlock) +{ + return 0; +} + +static inline int hwspin_lock_register(struct hwspinlock *hwlock) +{ + return -ENODEV; +} + +static inline struct hwspinlock *hwspin_lock_unregister(unsigned int id) +{ + return NULL; +} + +#endif /* !CONFIG_HWSPINLOCK */ + +/** + * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts + * @hwlock: an hwspinlock which we want to trylock + * @flags: a pointer to where the caller's interrupt state will be saved at + * + * This function attempts to lock the underlying hwspinlock, and will + * immediately fail if the hwspinlock is already locked. + * + * Upon a successful return from this function, preemption and local + * interrupts are disabled (previous interrupts state is saved at @flags), + * so the caller must not sleep, and is advised to release the hwspinlock + * as soon as possible. + * + * Returns 0 if we successfully locked the hwspinlock, -EBUSY if + * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. + */ +static inline +int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags) +{ + return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags); +} + +/** + * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts + * @hwlock: an hwspinlock which we want to trylock + * + * This function attempts to lock the underlying hwspinlock, and will + * immediately fail if the hwspinlock is already locked. + * + * Upon a successful return from this function, preemption and local + * interrupts are disabled, so the caller must not sleep, and is advised + * to release the hwspinlock as soon as possible. + * + * Returns 0 if we successfully locked the hwspinlock, -EBUSY if + * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. + */ +static inline int hwspin_trylock_irq(struct hwspinlock *hwlock) +{ + return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL); +} + +/** + * hwspin_trylock() - attempt to lock a specific hwspinlock + * @hwlock: an hwspinlock which we want to trylock + * + * This function attempts to lock an hwspinlock, and will immediately fail + * if the hwspinlock is already taken. + * + * Upon a successful return from this function, preemption is disabled, + * so the caller must not sleep, and is advised to release the hwspinlock + * as soon as possible. This is required in order to minimize remote cores + * polling on the hardware interconnect. + * + * Returns 0 if we successfully locked the hwspinlock, -EBUSY if + * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. + */ +static inline int hwspin_trylock(struct hwspinlock *hwlock) +{ + return __hwspin_trylock(hwlock, 0, NULL); +} + +/** + * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs + * @hwlock: the hwspinlock to be locked + * @to: timeout value in msecs + * @flags: a pointer to where the caller's interrupt state will be saved at + * + * This function locks the underlying @hwlock. If the @hwlock + * is already taken, the function will busy loop waiting for it to + * be released, but give up when @timeout msecs have elapsed. + * + * Upon a successful return from this function, preemption and local interrupts + * are disabled (plus previous interrupt state is saved), so the caller must + * not sleep, and is advised to release the hwspinlock as soon as possible. + * + * Returns 0 when the @hwlock was successfully taken, and an appropriate + * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still + * busy after @timeout msecs). The function will never sleep. + */ +static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock, + unsigned int to, unsigned long *flags) +{ + return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags); +} + +/** + * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs + * @hwlock: the hwspinlock to be locked + * @to: timeout value in msecs + * + * This function locks the underlying @hwlock. If the @hwlock + * is already taken, the function will busy loop waiting for it to + * be released, but give up when @timeout msecs have elapsed. + * + * Upon a successful return from this function, preemption and local interrupts + * are disabled so the caller must not sleep, and is advised to release the + * hwspinlock as soon as possible. + * + * Returns 0 when the @hwlock was successfully taken, and an appropriate + * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still + * busy after @timeout msecs). The function will never sleep. + */ +static inline +int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to) +{ + return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL); +} + +/** + * hwspin_lock_timeout() - lock an hwspinlock with timeout limit + * @hwlock: the hwspinlock to be locked + * @to: timeout value in msecs + * + * This function locks the underlying @hwlock. If the @hwlock + * is already taken, the function will busy loop waiting for it to + * be released, but give up when @timeout msecs have elapsed. + * + * Upon a successful return from this function, preemption is disabled + * so the caller must not sleep, and is advised to release the hwspinlock + * as soon as possible. + * This is required in order to minimize remote cores polling on the + * hardware interconnect. + * + * Returns 0 when the @hwlock was successfully taken, and an appropriate + * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still + * busy after @timeout msecs). The function will never sleep. + */ +static inline +int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to) +{ + return __hwspin_lock_timeout(hwlock, to, 0, NULL); +} + +/** + * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state + * @hwlock: a previously-acquired hwspinlock which we want to unlock + * @flags: previous caller's interrupt state to restore + * + * This function will unlock a specific hwspinlock, enable preemption and + * restore the previous state of the local interrupts. It should be used + * to undo, e.g., hwspin_trylock_irqsave(). + * + * @hwlock must be already locked before calling this function: it is a bug + * to call unlock on a @hwlock that is already unlocked. + */ +static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock, + unsigned long *flags) +{ + __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags); +} + +/** + * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts + * @hwlock: a previously-acquired hwspinlock which we want to unlock + * + * This function will unlock a specific hwspinlock, enable preemption and + * enable local interrupts. Should be used to undo hwspin_lock_irq(). + * + * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before + * calling this function: it is a bug to call unlock on a @hwlock that is + * already unlocked. + */ +static inline void hwspin_unlock_irq(struct hwspinlock *hwlock) +{ + __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL); +} + +/** + * hwspin_unlock() - unlock hwspinlock + * @hwlock: a previously-acquired hwspinlock which we want to unlock + * + * This function will unlock a specific hwspinlock and enable preemption + * back. + * + * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling + * this function: it is a bug to call unlock on a @hwlock that is already + * unlocked. + */ +static inline void hwspin_unlock(struct hwspinlock *hwlock) +{ + __hwspin_unlock(hwlock, 0, NULL); +} + +#endif /* __LINUX_HWSPINLOCK_H */ diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h index cd6f3b431195..d60130f88eed 100644 --- a/include/linux/mtd/onenand_regs.h +++ b/include/linux/mtd/onenand_regs.h @@ -168,6 +168,7 @@ #define ONENAND_SYS_CFG1_INT (1 << 6) #define ONENAND_SYS_CFG1_IOBE (1 << 5) #define ONENAND_SYS_CFG1_RDY_CONF (1 << 4) +#define ONENAND_SYS_CFG1_VHF (1 << 3) #define ONENAND_SYS_CFG1_HF (1 << 2) #define ONENAND_SYS_CFG1_SYNC_WRITE (1 << 1) |