diff options
-rw-r--r-- | drivers/mtd/spi-nor/core.c | 337 | ||||
-rw-r--r-- | include/linux/mtd/spi-nor.h | 13 |
2 files changed, 334 insertions, 16 deletions
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index 75ec7a3d0b43..9e6a0730cdb8 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -589,6 +589,65 @@ int spi_nor_sr_ready(struct spi_nor *nor) } /** + * spi_nor_use_parallel_locking() - Checks if RWW locking scheme shall be used + * @nor: pointer to 'struct spi_nor'. + * + * Return: true if parallel locking is enabled, false otherwise. + */ +static bool spi_nor_use_parallel_locking(struct spi_nor *nor) +{ + return nor->flags & SNOR_F_RWW; +} + +/* Locking helpers for status read operations */ +static int spi_nor_rww_start_rdst(struct spi_nor *nor) +{ + struct spi_nor_rww *rww = &nor->rww; + int ret = -EAGAIN; + + mutex_lock(&nor->lock); + + if (rww->ongoing_io || rww->ongoing_rd) + goto busy; + + rww->ongoing_io = true; + rww->ongoing_rd = true; + ret = 0; + +busy: + mutex_unlock(&nor->lock); + return ret; +} + +static void spi_nor_rww_end_rdst(struct spi_nor *nor) +{ + struct spi_nor_rww *rww = &nor->rww; + + mutex_lock(&nor->lock); + + rww->ongoing_io = false; + rww->ongoing_rd = false; + + mutex_unlock(&nor->lock); +} + +static int spi_nor_lock_rdst(struct spi_nor *nor) +{ + if (spi_nor_use_parallel_locking(nor)) + return spi_nor_rww_start_rdst(nor); + + return 0; +} + +static void spi_nor_unlock_rdst(struct spi_nor *nor) +{ + if (spi_nor_use_parallel_locking(nor)) { + spi_nor_rww_end_rdst(nor); + wake_up(&nor->rww.wait); + } +} + +/** * spi_nor_ready() - Query the flash to see if it is ready for new commands. * @nor: pointer to 'struct spi_nor'. * @@ -596,11 +655,21 @@ int spi_nor_sr_ready(struct spi_nor *nor) */ static int spi_nor_ready(struct spi_nor *nor) { + int ret; + + ret = spi_nor_lock_rdst(nor); + if (ret) + return 0; + /* Flashes might override the standard routine. */ if (nor->params->ready) - return nor->params->ready(nor); + ret = nor->params->ready(nor); + else + ret = spi_nor_sr_ready(nor); - return spi_nor_sr_ready(nor); + spi_nor_unlock_rdst(nor); + + return ret; } /** @@ -1086,7 +1155,88 @@ static void spi_nor_unprep(struct spi_nor *nor) nor->controller_ops->unprepare(nor); } +static void spi_nor_offset_to_banks(u64 bank_size, loff_t start, size_t len, + u8 *first, u8 *last) +{ + /* This is currently safe, the number of banks being very small */ + *first = DIV_ROUND_DOWN_ULL(start, bank_size); + *last = DIV_ROUND_DOWN_ULL(start + len - 1, bank_size); +} + /* Generic helpers for internal locking and serialization */ +static bool spi_nor_rww_start_io(struct spi_nor *nor) +{ + struct spi_nor_rww *rww = &nor->rww; + bool start = false; + + mutex_lock(&nor->lock); + + if (rww->ongoing_io) + goto busy; + + rww->ongoing_io = true; + start = true; + +busy: + mutex_unlock(&nor->lock); + return start; +} + +static void spi_nor_rww_end_io(struct spi_nor *nor) +{ + mutex_lock(&nor->lock); + nor->rww.ongoing_io = false; + mutex_unlock(&nor->lock); +} + +static int spi_nor_lock_device(struct spi_nor *nor) +{ + if (!spi_nor_use_parallel_locking(nor)) + return 0; + + return wait_event_killable(nor->rww.wait, spi_nor_rww_start_io(nor)); +} + +static void spi_nor_unlock_device(struct spi_nor *nor) +{ + if (spi_nor_use_parallel_locking(nor)) { + spi_nor_rww_end_io(nor); + wake_up(&nor->rww.wait); + } +} + +/* Generic helpers for internal locking and serialization */ +static bool spi_nor_rww_start_exclusive(struct spi_nor *nor) +{ + struct spi_nor_rww *rww = &nor->rww; + bool start = false; + + mutex_lock(&nor->lock); + + if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe) + goto busy; + + rww->ongoing_io = true; + rww->ongoing_rd = true; + rww->ongoing_pe = true; + start = true; + +busy: + mutex_unlock(&nor->lock); + return start; +} + +static void spi_nor_rww_end_exclusive(struct spi_nor *nor) +{ + struct spi_nor_rww *rww = &nor->rww; + + mutex_lock(&nor->lock); + rww->ongoing_io = false; + rww->ongoing_rd = false; + rww->ongoing_pe = false; + mutex_unlock(&nor->lock); +} + int spi_nor_prep_and_lock(struct spi_nor *nor) { int ret; @@ -1095,19 +1245,75 @@ int spi_nor_prep_and_lock(struct spi_nor *nor) if (ret) return ret; - mutex_lock(&nor->lock); + if (!spi_nor_use_parallel_locking(nor)) + mutex_lock(&nor->lock); + else + ret = wait_event_killable(nor->rww.wait, + spi_nor_rww_start_exclusive(nor)); - return 0; + return ret; } void spi_nor_unlock_and_unprep(struct spi_nor *nor) { - mutex_unlock(&nor->lock); + if (!spi_nor_use_parallel_locking(nor)) { + mutex_unlock(&nor->lock); + } else { + spi_nor_rww_end_exclusive(nor); + wake_up(&nor->rww.wait); + } spi_nor_unprep(nor); } /* Internal locking helpers for program and erase operations */ +static bool spi_nor_rww_start_pe(struct spi_nor *nor, loff_t start, size_t len) +{ + struct spi_nor_rww *rww = &nor->rww; + unsigned int used_banks = 0; + bool started = false; + u8 first, last; + int bank; + + mutex_lock(&nor->lock); + + if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe) + goto busy; + + spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); + for (bank = first; bank <= last; bank++) { + if (rww->used_banks & BIT(bank)) + goto busy; + + used_banks |= BIT(bank); + } + + rww->used_banks |= used_banks; + rww->ongoing_pe = true; + started = true; + +busy: + mutex_unlock(&nor->lock); + return started; +} + +static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len) +{ + struct spi_nor_rww *rww = &nor->rww; + u8 first, last; + int bank; + + mutex_lock(&nor->lock); + + spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); + for (bank = first; bank <= last; bank++) + rww->used_banks &= ~BIT(bank); + + rww->ongoing_pe = false; + + mutex_unlock(&nor->lock); +} + static int spi_nor_prep_and_lock_pe(struct spi_nor *nor, loff_t start, size_t len) { int ret; @@ -1116,19 +1322,77 @@ static int spi_nor_prep_and_lock_pe(struct spi_nor *nor, loff_t start, size_t le if (ret) return ret; - mutex_lock(&nor->lock); + if (!spi_nor_use_parallel_locking(nor)) + mutex_lock(&nor->lock); + else + ret = wait_event_killable(nor->rww.wait, + spi_nor_rww_start_pe(nor, start, len)); - return 0; + return ret; } static void spi_nor_unlock_and_unprep_pe(struct spi_nor *nor, loff_t start, size_t len) { - mutex_unlock(&nor->lock); + if (!spi_nor_use_parallel_locking(nor)) { + mutex_unlock(&nor->lock); + } else { + spi_nor_rww_end_pe(nor, start, len); + wake_up(&nor->rww.wait); + } spi_nor_unprep(nor); } /* Internal locking helpers for read operations */ +static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len) +{ + struct spi_nor_rww *rww = &nor->rww; + unsigned int used_banks = 0; + bool started = false; + u8 first, last; + int bank; + + mutex_lock(&nor->lock); + + if (rww->ongoing_io || rww->ongoing_rd) + goto busy; + + spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); + for (bank = first; bank <= last; bank++) { + if (rww->used_banks & BIT(bank)) + goto busy; + + used_banks |= BIT(bank); + } + + rww->used_banks |= used_banks; + rww->ongoing_io = true; + rww->ongoing_rd = true; + started = true; + +busy: + mutex_unlock(&nor->lock); + return started; +} + +static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len) +{ + struct spi_nor_rww *rww = &nor->rww; + u8 first, last; + int bank; + + mutex_lock(&nor->lock); + + spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); + for (bank = first; bank <= last; bank++) + nor->rww.used_banks &= ~BIT(bank); + + rww->ongoing_io = false; + rww->ongoing_rd = false; + + mutex_unlock(&nor->lock); +} + static int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t len) { int ret; @@ -1137,14 +1401,23 @@ static int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t le if (ret) return ret; - mutex_lock(&nor->lock); + if (!spi_nor_use_parallel_locking(nor)) + mutex_lock(&nor->lock); + else + ret = wait_event_killable(nor->rww.wait, + spi_nor_rww_start_rd(nor, start, len)); - return 0; + return ret; } static void spi_nor_unlock_and_unprep_rd(struct spi_nor *nor, loff_t start, size_t len) { - mutex_unlock(&nor->lock); + if (!spi_nor_use_parallel_locking(nor)) { + mutex_unlock(&nor->lock); + } else { + spi_nor_rww_end_rd(nor, start, len); + wake_up(&nor->rww.wait); + } spi_nor_unprep(nor); } @@ -1453,11 +1726,18 @@ static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len) dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n", cmd->size, cmd->opcode, cmd->count); - ret = spi_nor_write_enable(nor); + ret = spi_nor_lock_device(nor); if (ret) goto destroy_erase_cmd_list; + ret = spi_nor_write_enable(nor); + if (ret) { + spi_nor_unlock_device(nor); + goto destroy_erase_cmd_list; + } + ret = spi_nor_erase_sector(nor, addr); + spi_nor_unlock_device(nor); if (ret) goto destroy_erase_cmd_list; @@ -1510,11 +1790,18 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) { unsigned long timeout; - ret = spi_nor_write_enable(nor); + ret = spi_nor_lock_device(nor); if (ret) goto erase_err; + ret = spi_nor_write_enable(nor); + if (ret) { + spi_nor_unlock_device(nor); + goto erase_err; + } + ret = spi_nor_erase_chip(nor); + spi_nor_unlock_device(nor); if (ret) goto erase_err; @@ -1539,11 +1826,18 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) /* "sector"-at-a-time erase */ } else if (spi_nor_has_uniform_erase(nor)) { while (len) { - ret = spi_nor_write_enable(nor); + ret = spi_nor_lock_device(nor); if (ret) goto erase_err; + ret = spi_nor_write_enable(nor); + if (ret) { + spi_nor_unlock_device(nor); + goto erase_err; + } + ret = spi_nor_erase_sector(nor, addr); + spi_nor_unlock_device(nor); if (ret) goto erase_err; @@ -1836,11 +2130,18 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, addr = spi_nor_convert_addr(nor, addr); - ret = spi_nor_write_enable(nor); + ret = spi_nor_lock_device(nor); if (ret) goto write_err; + ret = spi_nor_write_enable(nor); + if (ret) { + spi_nor_unlock_device(nor); + goto write_err; + } + ret = spi_nor_write_data(nor, addr, page_remain, buf + i); + spi_nor_unlock_device(nor); if (ret < 0) goto write_err; written = ret; @@ -2531,7 +2832,8 @@ static void spi_nor_init_flags(struct spi_nor *nor) if (flags & NO_CHIP_ERASE) nor->flags |= SNOR_F_NO_OP_CHIP_ERASE; - if (flags & SPI_NOR_RWW) + if (flags & SPI_NOR_RWW && nor->info->n_banks > 1 && + !nor->controller_ops) nor->flags |= SNOR_F_RWW; } @@ -3128,6 +3430,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, if (ret) return ret; + if (spi_nor_use_parallel_locking(nor)) + init_waitqueue_head(&nor->rww.wait); + /* * Configure the SPI memory: * - select op codes for (Fast) Read, Page Program and Sector Erase. diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index a3f8cdca90c8..82547b4b3708 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -343,6 +343,12 @@ struct spi_nor_flash_parameter; * struct spi_nor - Structure for defining the SPI NOR layer * @mtd: an mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations + * @rww: Read-While-Write (RWW) sync lock + * @rww.wait: wait queue for the RWW sync + * @rww.ongoing_io: the bus is busy + * @rww.ongoing_rd: a read is ongoing on the chip + * @rww.ongoing_pe: a program/erase is ongoing on the chip + * @rww.used_banks: bitmap of the banks in use * @dev: pointer to an SPI device or an SPI NOR controller device * @spimem: pointer to the SPI memory device * @bouncebuf: bounce buffer used when the buffer passed by the MTD @@ -376,6 +382,13 @@ struct spi_nor_flash_parameter; struct spi_nor { struct mtd_info mtd; struct mutex lock; + struct spi_nor_rww { + wait_queue_head_t wait; + bool ongoing_io; + bool ongoing_rd; + bool ongoing_pe; + unsigned int used_banks; + } rww; struct device *dev; struct spi_mem *spimem; u8 *bouncebuf; |