diff options
Diffstat (limited to 'drivers/staging')
202 files changed, 14466 insertions, 11069 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 0a993c47273e..3bd80f9695ac 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -42,8 +42,6 @@ source "drivers/staging/rts5208/Kconfig" source "drivers/staging/octeon/Kconfig" -source "drivers/staging/octeon-usb/Kconfig" - source "drivers/staging/vt6655/Kconfig" source "drivers/staging/vt6656/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 2800ab9b2d1d..1d9ae39fea14 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -11,7 +11,6 @@ obj-$(CONFIG_R8712U) += rtl8712/ obj-$(CONFIG_R8188EU) += r8188eu/ obj-$(CONFIG_RTS5208) += rts5208/ obj-$(CONFIG_OCTEON_ETHERNET) += octeon/ -obj-$(CONFIG_OCTEON_USB) += octeon-usb/ obj-$(CONFIG_VT6655) += vt6655/ obj-$(CONFIG_VT6656) += vt6656/ obj-$(CONFIG_VME_BUS) += vme_user/ diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c index 6fd549a424d5..b8d55aa8c5c7 100644 --- a/drivers/staging/fbtft/fb_ssd1351.c +++ b/drivers/staging/fbtft/fb_ssd1351.c @@ -196,8 +196,7 @@ static int update_onboard_backlight(struct backlight_device *bd) "%s: power=%d, fb_blank=%d\n", __func__, bd->props.power, bd->props.fb_blank); - on = (bd->props.power == FB_BLANK_UNBLANK) && - (bd->props.fb_blank == FB_BLANK_UNBLANK); + on = !backlight_is_blank(bd); /* Onboard backlight connected to GPIO0 on SSD1351, GPIO1 unused */ write_reg(par, 0xB5, on ? 0x03 : 0x02); diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index 60b2278d8b16..afaba94d1d1c 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -137,8 +137,7 @@ static int fbtft_backlight_update_status(struct backlight_device *bd) "%s: polarity=%d, power=%d, fb_blank=%d\n", __func__, polarity, bd->props.power, bd->props.fb_blank); - if ((bd->props.power == FB_BLANK_UNBLANK) && - (bd->props.fb_blank == FB_BLANK_UNBLANK)) + if (!backlight_is_blank(bd)) gpiod_set_value(par->gpio.led[0], polarity); else gpiod_set_value(par->gpio.led[0], !polarity); @@ -655,7 +654,6 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, fbdefio->delay = HZ / fps; fbdefio->sort_pagereflist = true; fbdefio->deferred_io = fbtft_deferred_io; - fb_deferred_io_init(info); snprintf(info->fix.id, sizeof(info->fix.id), "%s", dev->driver->name); info->fix.type = FB_TYPE_PACKED_PIXELS; @@ -666,6 +664,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, info->fix.line_length = width * bpp / 8; info->fix.accel = FB_ACCEL_NONE; info->fix.smem_len = vmem_size; + fb_deferred_io_init(info); info->var.rotate = pdata->rotate; info->var.xres = width; diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c index 04df6f9f5403..cc6d80554c98 100644 --- a/drivers/staging/gdm724x/gdm_tty.c +++ b/drivers/staging/gdm724x/gdm_tty.c @@ -17,12 +17,6 @@ #define GDM_TTY_MAJOR 0 #define GDM_TTY_MINOR 32 -#define ACM_CTRL_DTR 0x01 -#define ACM_CTRL_RTS 0x02 -#define ACM_CTRL_DSR 0x02 -#define ACM_CTRL_RI 0x08 -#define ACM_CTRL_DCD 0x01 - #define WRITE_SIZE 2048 #define MUX_TX_MAX_SIZE 2048 diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c index 843760675876..05e91e6bc2a0 100644 --- a/drivers/staging/greybus/audio_helper.c +++ b/drivers/staging/greybus/audio_helper.c @@ -115,7 +115,7 @@ int gbaudio_dapm_free_controls(struct snd_soc_dapm_context *dapm, int num) { int i; - struct snd_soc_dapm_widget *w, *next_w; + struct snd_soc_dapm_widget *w, *tmp_w; #ifdef CONFIG_DEBUG_FS struct dentry *parent = dapm->debugfs_dapm; struct dentry *debugfs_w = NULL; @@ -124,13 +124,13 @@ int gbaudio_dapm_free_controls(struct snd_soc_dapm_context *dapm, mutex_lock(&dapm->card->dapm_mutex); for (i = 0; i < num; i++) { /* below logic can be optimized to identify widget pointer */ - list_for_each_entry_safe(w, next_w, &dapm->card->widgets, - list) { - if (w->dapm != dapm) - continue; - if (!strcmp(w->name, widget->name)) + w = NULL; + list_for_each_entry(tmp_w, &dapm->card->widgets, list) { + if (tmp_w->dapm == dapm && + !strcmp(tmp_w->name, widget->name)) { + w = tmp_w; break; - w = NULL; + } } if (!w) { dev_err(dapm->dev, "%s: widget not found\n", diff --git a/drivers/staging/greybus/fw-management.c b/drivers/staging/greybus/fw-management.c index 687c6405c65b..3342b84597da 100644 --- a/drivers/staging/greybus/fw-management.c +++ b/drivers/staging/greybus/fw-management.c @@ -102,7 +102,7 @@ unlock: } static int fw_mgmt_interface_fw_version_operation(struct fw_mgmt *fw_mgmt, - struct fw_mgmt_ioc_get_intf_version *fw_info) + struct fw_mgmt_ioc_get_intf_version *fw_info) { struct gb_connection *connection = fw_mgmt->connection; struct gb_fw_mgmt_interface_fw_version_response response; @@ -240,7 +240,7 @@ static int fw_mgmt_interface_fw_loaded_operation(struct gb_operation *op) } static int fw_mgmt_backend_fw_version_operation(struct fw_mgmt *fw_mgmt, - struct fw_mgmt_ioc_get_backend_version *fw_info) + struct fw_mgmt_ioc_get_backend_version *fw_info) { struct gb_connection *connection = fw_mgmt->connection; struct gb_fw_mgmt_backend_fw_version_request request; @@ -473,7 +473,7 @@ static int fw_mgmt_ioctl(struct fw_mgmt *fw_mgmt, unsigned int cmd, return -EFAULT; ret = fw_mgmt_backend_fw_update_operation(fw_mgmt, - backend_update.firmware_tag); + backend_update.firmware_tag); if (ret) return ret; diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c index 2471448ba42a..1a61fce98056 100644 --- a/drivers/staging/greybus/loopback.c +++ b/drivers/staging/greybus/loopback.c @@ -870,7 +870,7 @@ static int gb_loopback_fn(void *data) if (gb->send_count == gb->iteration_max) { mutex_unlock(&gb->mutex); - /* Wait for synchronous and asynchronus completion */ + /* Wait for synchronous and asynchronous completion */ gb_loopback_async_wait_all(gb); /* Mark complete unless user-space has poked us */ diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig index 1fd6a0c6e1d8..421ce9dbf44c 100644 --- a/drivers/staging/media/Kconfig +++ b/drivers/staging/media/Kconfig @@ -22,10 +22,14 @@ if STAGING_MEDIA && MEDIA_SUPPORT # Please keep them in alphabetic order source "drivers/staging/media/atomisp/Kconfig" +source "drivers/staging/media/av7110/Kconfig" + source "drivers/staging/media/hantro/Kconfig" source "drivers/staging/media/imx/Kconfig" +source "drivers/staging/media/ipu3/Kconfig" + source "drivers/staging/media/max96712/Kconfig" source "drivers/staging/media/meson/vdec/Kconfig" @@ -34,14 +38,12 @@ source "drivers/staging/media/omap4iss/Kconfig" source "drivers/staging/media/rkvdec/Kconfig" -source "drivers/staging/media/sunxi/Kconfig" +source "drivers/staging/media/stkwebcam/Kconfig" -source "drivers/staging/media/zoran/Kconfig" +source "drivers/staging/media/sunxi/Kconfig" source "drivers/staging/media/tegra-video/Kconfig" -source "drivers/staging/media/ipu3/Kconfig" - -source "drivers/staging/media/av7110/Kconfig" +source "drivers/staging/media/zoran/Kconfig" endif diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile index 66d6f6d51c86..950e96f10aad 100644 --- a/drivers/staging/media/Makefile +++ b/drivers/staging/media/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_VIDEO_MAX96712) += max96712/ obj-$(CONFIG_VIDEO_MESON_VDEC) += meson/vdec/ obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/ obj-$(CONFIG_VIDEO_ROCKCHIP_VDEC) += rkvdec/ +obj-$(CONFIG_VIDEO_STKWEBCAM) += stkwebcam/ obj-$(CONFIG_VIDEO_SUNXI) += sunxi/ obj-$(CONFIG_VIDEO_TEGRA) += tegra-video/ obj-$(CONFIG_VIDEO_HANTRO) += hantro/ diff --git a/drivers/staging/media/atomisp/Makefile b/drivers/staging/media/atomisp/Makefile index 2485d7b3fee2..fb7b406f50bf 100644 --- a/drivers/staging/media/atomisp/Makefile +++ b/drivers/staging/media/atomisp/Makefile @@ -13,7 +13,6 @@ atomisp = $(srctree)/drivers/staging/media/atomisp/ # SPDX-License-Identifier: GPL-2.0 atomisp-objs += \ - pci/atomisp_acc.o \ pci/atomisp_cmd.o \ pci/atomisp_compat_css20.o \ pci/atomisp_csi2.o \ @@ -45,9 +44,7 @@ atomisp-objs += \ pci/camera/pipe/src/pipe_util.o \ pci/camera/util/src/util.o \ pci/hmm/hmm_bo.o \ - pci/hmm/hmm_dynamic_pool.o \ pci/hmm/hmm.o \ - pci/hmm/hmm_reserved_pool.o \ pci/ia_css_device_access.o \ pci/ia_css_isp_configs.o \ pci/ia_css_isp_states.o \ diff --git a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c index 00d6842c07d6..3c81ab73cdae 100644 --- a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c +++ b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c @@ -616,13 +616,15 @@ static int mt9m114_get_intg_factor(struct i2c_client *client, struct camera_mipi_info *info, const struct mt9m114_res_struct *res) { - struct atomisp_sensor_mode_data *buf = &info->data; + struct atomisp_sensor_mode_data *buf; u32 reg_val; int ret; if (!info) return -EINVAL; + buf = &info->data; + ret = mt9m114_read_reg(client, MISENSOR_32BIT, REG_PIXEL_CLK, ®_val); if (ret) diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c index da98094d7094..d5d099ac1b70 100644 --- a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c +++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c @@ -906,22 +906,17 @@ static int ov2722_get_fmt(struct v4l2_subdev *sd, static int ov2722_detect(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; - u16 high, low; - int ret; + u16 high = 0, low = 0; u16 id; u8 revision; if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) return -ENODEV; - ret = ov2722_read_reg(client, OV2722_8BIT, - OV2722_SC_CMMN_CHIP_ID_H, &high); - if (ret) { - dev_err(&client->dev, "sensor_id_high = 0x%x\n", high); - return -ENODEV; - } - ret = ov2722_read_reg(client, OV2722_8BIT, - OV2722_SC_CMMN_CHIP_ID_L, &low); + ov2722_read_reg(client, OV2722_8BIT, + OV2722_SC_CMMN_CHIP_ID_H, &high); + ov2722_read_reg(client, OV2722_8BIT, + OV2722_SC_CMMN_CHIP_ID_L, &low); id = (high << 8) | low; if ((id != OV2722_ID) && (id != OV2720_ID)) { @@ -929,8 +924,9 @@ static int ov2722_detect(struct i2c_client *client) return -ENODEV; } - ret = ov2722_read_reg(client, OV2722_8BIT, - OV2722_SC_CMMN_SUB_ID, &high); + high = 0; + ov2722_read_reg(client, OV2722_8BIT, + OV2722_SC_CMMN_SUB_ID, &high); revision = (u8)high & 0x0f; dev_dbg(&client->dev, "sensor_revision = 0x%x\n", revision); diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h index 79df07bd69b6..a1366666f49c 100644 --- a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h +++ b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h @@ -855,7 +855,7 @@ static struct ov5693_reg const ov5693_1616x1216_30fps[] = { {OV5693_8BIT, 0x3813, 0x06}, /*{3812,3813} windowing Y offset*/ {OV5693_8BIT, 0x3814, 0x11}, /*X subsample control*/ {OV5693_8BIT, 0x3815, 0x11}, /*Y subsample control*/ - {OV5693_8BIT, 0x3820, 0x00}, /*FLIP/Binnning control*/ + {OV5693_8BIT, 0x3820, 0x00}, /*FLIP/Binning control*/ {OV5693_8BIT, 0x3821, 0x1e}, /*MIRROR control*/ {OV5693_8BIT, 0x5002, 0x00}, {OV5693_8BIT, 0x5041, 0x84}, diff --git a/drivers/staging/media/atomisp/include/hmm/hmm.h b/drivers/staging/media/atomisp/include/hmm/hmm.h index b48bdf5c274c..c0384bb0a762 100644 --- a/drivers/staging/media/atomisp/include/hmm/hmm.h +++ b/drivers/staging/media/atomisp/include/hmm/hmm.h @@ -26,21 +26,18 @@ #include <linux/slab.h> #include <linux/mm.h> -#include "hmm/hmm_pool.h" +#include "hmm_common.h" +#include "hmm/hmm_bo.h" #include "ia_css_types.h" #define mmgr_NULL ((ia_css_ptr)0) #define mmgr_EXCEPTION ((ia_css_ptr) - 1) -int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type); -void hmm_pool_unregister(enum hmm_pool_type pool_type); - int hmm_init(void); void hmm_cleanup(void); -ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type, - int from_highmem, const void __user *userptr, - const uint16_t attrs); +ia_css_ptr hmm_alloc(size_t bytes); +ia_css_ptr hmm_create_from_userdata(size_t bytes, const void __user *userptr); void hmm_free(ia_css_ptr ptr); int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes); int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes); @@ -69,17 +66,6 @@ void hmm_vunmap(ia_css_ptr virt); void hmm_flush_vmap(ia_css_ptr virt); /* - * Address translation from ISP shared memory address to kernel virtual address - * if the memory is not vmmaped, then do it. - */ -void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached); - -/* - * Address translation from kernel virtual address to ISP shared memory address - */ -ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr); - -/* * map ISP memory starts with virt to specific vma. * * used for mmap operation. @@ -89,16 +75,6 @@ ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr); */ int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt); -/* show memory statistic - */ -void hmm_show_mem_stat(const char *func, const int line); - -/* init memory statistic - */ -void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr); - -extern bool dypool_enable; -extern unsigned int dypool_pgnr; extern struct hmm_bo_device bo_device; #endif diff --git a/drivers/staging/media/atomisp/include/hmm/hmm_bo.h b/drivers/staging/media/atomisp/include/hmm/hmm_bo.h index 8c78a5d87b65..385e22fc4a46 100644 --- a/drivers/staging/media/atomisp/include/hmm/hmm_bo.h +++ b/drivers/staging/media/atomisp/include/hmm/hmm_bo.h @@ -76,17 +76,10 @@ enum hmm_bo_type { HMM_BO_PRIVATE, - HMM_BO_SHARE, HMM_BO_USER, HMM_BO_LAST, }; -enum hmm_page_type { - HMM_PAGE_TYPE_RESERVED, - HMM_PAGE_TYPE_DYNAMIC, - HMM_PAGE_TYPE_GENERAL, -}; - #define HMM_BO_MASK 0x1 #define HMM_BO_FREE 0x0 #define HMM_BO_ALLOCED 0x1 @@ -121,11 +114,6 @@ struct hmm_bo_device { struct kmem_cache *bo_cache; }; -struct hmm_page_object { - struct page *page; - enum hmm_page_type type; -}; - struct hmm_buffer_object { struct hmm_bo_device *bdev; struct list_head list; @@ -136,8 +124,6 @@ struct hmm_buffer_object { /* mutex protecting this BO */ struct mutex mutex; enum hmm_bo_type type; - struct hmm_page_object *page_obj; /* physical pages */ - int from_highmem; int mmap_count; int status; int mem_type; @@ -218,34 +204,20 @@ void hmm_bo_ref(struct hmm_buffer_object *bo); */ void hmm_bo_unref(struct hmm_buffer_object *bo); -/* - * allocate/free physical pages for the bo. will try to alloc mem - * from highmem if from_highmem is set, and type indicate that the - * pages will be allocated by using video driver (for share buffer) - * or by ISP driver itself. - */ - int hmm_bo_allocated(struct hmm_buffer_object *bo); /* - * allocate/free physical pages for the bo. will try to alloc mem - * from highmem if from_highmem is set, and type indicate that the + * Allocate/Free physical pages for the bo. Type indicates if the * pages will be allocated by using video driver (for share buffer) * or by ISP driver itself. */ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo, - enum hmm_bo_type type, int from_highmem, - const void __user *userptr, bool cached); + enum hmm_bo_type type, + const void __user *userptr); void hmm_bo_free_pages(struct hmm_buffer_object *bo); int hmm_bo_page_allocated(struct hmm_buffer_object *bo); /* - * get physical page info of the bo. - */ -int hmm_bo_get_page_info(struct hmm_buffer_object *bo, - struct hmm_page_object **page_obj, int *pgnr); - -/* * bind/unbind the physical pages to a virtual address space. */ int hmm_bo_bind(struct hmm_buffer_object *bo); @@ -280,9 +252,6 @@ void hmm_bo_vunmap(struct hmm_buffer_object *bo); int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo); -extern struct hmm_pool dynamic_pool; -extern struct hmm_pool reserved_pool; - /* * find the buffer object by its virtual address vaddr. * return NULL if no such buffer object found. diff --git a/drivers/staging/media/atomisp/include/hmm/hmm_common.h b/drivers/staging/media/atomisp/include/hmm/hmm_common.h index 7152e9b52ba4..d8610b135de0 100644 --- a/drivers/staging/media/atomisp/include/hmm/hmm_common.h +++ b/drivers/staging/media/atomisp/include/hmm/hmm_common.h @@ -68,30 +68,4 @@ #define check_null_return_void(ptr, fmt, arg ...) \ var_equal_return_void(ptr, NULL, fmt, ## arg) -/* hmm_mem_stat is used to trace the hmm mem used by ISP pipe. The unit is page - * number. - * - * res_size: reserved mem pool size, being allocated from system at system boot time. - * res_size >= res_cnt. - * sys_size: system mem pool size, being allocated from system at camera running time. - * dyc_size: dynamic mem pool size. - * dyc_thr: dynamic mem pool high watermark. - * dyc_size <= dyc_thr. - * usr_size: user ptr mem size. - * - * res_cnt: track the mem allocated from reserved pool at camera running time. - * tol_cnt: track the total mem used by ISP pipe at camera running time. - */ -struct _hmm_mem_stat { - int res_size; - int sys_size; - int dyc_size; - int dyc_thr; - int usr_size; - int res_cnt; - int tol_cnt; -}; - -extern struct _hmm_mem_stat hmm_mem_stat; - #endif diff --git a/drivers/staging/media/atomisp/include/hmm/hmm_pool.h b/drivers/staging/media/atomisp/include/hmm/hmm_pool.h deleted file mode 100644 index 3fef57de973c..000000000000 --- a/drivers/staging/media/atomisp/include/hmm/hmm_pool.h +++ /dev/null @@ -1,116 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#ifndef __HMM_POOL_H__ -#define __HMM_POOL_H__ - -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/list.h> -#include <linux/spinlock.h> -#include <linux/mutex.h> -#include <linux/kref.h> -#include "hmm_common.h" -#include "hmm/hmm_bo.h" - -#define ALLOC_PAGE_FAIL_NUM 5 - -enum hmm_pool_type { - HMM_POOL_TYPE_RESERVED, - HMM_POOL_TYPE_DYNAMIC, -}; - -/** - * struct hmm_pool_ops - memory pool callbacks. - * - * @pool_init: initialize the memory pool. - * @pool_exit: uninitialize the memory pool. - * @pool_alloc_pages: allocate pages from memory pool. - * @pool_free_pages: free pages to memory pool. - * @pool_inited: check whether memory pool is initialized. - */ -struct hmm_pool_ops { - int (*pool_init)(void **pool, unsigned int pool_size); - void (*pool_exit)(void **pool); - unsigned int (*pool_alloc_pages)(void *pool, - struct hmm_page_object *page_obj, - unsigned int size, bool cached); - void (*pool_free_pages)(void *pool, - struct hmm_page_object *page_obj); - int (*pool_inited)(void *pool); -}; - -struct hmm_pool { - struct hmm_pool_ops *pops; - - void *pool_info; -}; - -/** - * struct hmm_reserved_pool_info - represents reserved pool private data. - * @pages: a array that store physical pages. - * The array is as reserved memory pool. - * @index: to indicate the first blank page number - * in reserved memory pool(pages array). - * @pgnr: the valid page amount in reserved memory - * pool. - * @list_lock: list lock is used to protect the operation - * to reserved memory pool. - * @flag: reserved memory pool state flag. - */ -struct hmm_reserved_pool_info { - struct page **pages; - - unsigned int index; - unsigned int pgnr; - spinlock_t list_lock; - bool initialized; -}; - -/** - * struct hmm_dynamic_pool_info - represents dynamic pool private data. - * @pages_list: a list that store physical pages. - * The pages list is as dynamic memory pool. - * @list_lock: list lock is used to protect the operation - * to dynamic memory pool. - * @flag: dynamic memory pool state flag. - * @pgptr_cache: struct kmem_cache, manages a cache. - */ -struct hmm_dynamic_pool_info { - struct list_head pages_list; - - /* list lock is used to protect the free pages block lists */ - spinlock_t list_lock; - - struct kmem_cache *pgptr_cache; - bool initialized; - - unsigned int pool_size; - unsigned int pgnr; -}; - -struct hmm_page { - struct page *page; - struct list_head list; -}; - -extern struct hmm_pool_ops reserved_pops; -extern struct hmm_pool_ops dynamic_pops; - -#endif diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h index 22c4103b0385..f96f5adbd9de 100644 --- a/drivers/staging/media/atomisp/include/linux/atomisp.h +++ b/drivers/staging/media/atomisp/include/linux/atomisp.h @@ -740,24 +740,6 @@ enum atomisp_frame_status { ATOMISP_FRAME_STATUS_FLASH_FAILED, }; -enum atomisp_acc_type { - ATOMISP_ACC_STANDALONE, /* Stand-alone acceleration */ - ATOMISP_ACC_OUTPUT, /* Accelerator stage on output frame */ - ATOMISP_ACC_VIEWFINDER /* Accelerator stage on viewfinder frame */ -}; - -enum atomisp_acc_arg_type { - ATOMISP_ACC_ARG_SCALAR_IN, /* Scalar input argument */ - ATOMISP_ACC_ARG_SCALAR_OUT, /* Scalar output argument */ - ATOMISP_ACC_ARG_SCALAR_IO, /* Scalar in/output argument */ - ATOMISP_ACC_ARG_PTR_IN, /* Pointer input argument */ - ATOMISP_ACC_ARG_PTR_OUT, /* Pointer output argument */ - ATOMISP_ACC_ARG_PTR_IO, /* Pointer in/output argument */ - ATOMISP_ARG_PTR_NOFLUSH, /* Pointer argument will not be flushed */ - ATOMISP_ARG_PTR_STABLE, /* Pointer input argument that is stable */ - ATOMISP_ACC_ARG_FRAME /* Frame argument */ -}; - /* ISP memories, isp2400 */ enum atomisp_acc_memory { ATOMISP_ACC_MEMORY_PMEM0 = 0, @@ -836,56 +818,6 @@ enum atomisp_burst_capture_options { #define EXT_ISP_SHOT_MODE_ANIMATED_PHOTO 10 #define EXT_ISP_SHOT_MODE_SPORTS 11 -struct atomisp_sp_arg { - enum atomisp_acc_arg_type type; /* Type of SP argument */ - void *value; /* Value of SP argument */ - unsigned int size; /* Size of SP argument */ -}; - -/* Acceleration API */ - -/* For CSS 1.0 only */ -struct atomisp_acc_fw_arg { - unsigned int fw_handle; - unsigned int index; - void __user *value; - size_t size; -}; - -/* - * Set arguments after first mapping with ATOMISP_IOC_ACC_S_MAPPED_ARG. - */ -struct atomisp_acc_s_mapped_arg { - unsigned int fw_handle; - __u32 memory; /* one of enum atomisp_acc_memory */ - size_t length; - unsigned long css_ptr; -}; - -struct atomisp_acc_fw_abort { - unsigned int fw_handle; - /* Timeout in us */ - unsigned int timeout; -}; - -struct atomisp_acc_fw_load { - unsigned int size; - unsigned int fw_handle; - void __user *data; -}; - -/* - * Load firmware to specified pipeline. - */ -struct atomisp_acc_fw_load_to_pipe { - __u32 flags; /* Flags, see below for valid values */ - unsigned int fw_handle; /* Handle, filled by kernel. */ - __u32 size; /* Firmware binary size */ - void __user *data; /* Pointer to firmware */ - __u32 type; /* Binary type */ - __u32 reserved[3]; /* Set to zero */ -}; - /* * Set Senor run mode */ @@ -893,37 +825,6 @@ struct atomisp_s_runmode { __u32 mode; }; -#define ATOMISP_ACC_FW_LOAD_FL_PREVIEW BIT(0) -#define ATOMISP_ACC_FW_LOAD_FL_COPY BIT(1) -#define ATOMISP_ACC_FW_LOAD_FL_VIDEO BIT(2) -#define ATOMISP_ACC_FW_LOAD_FL_CAPTURE BIT(3) -#define ATOMISP_ACC_FW_LOAD_FL_ACC BIT(4) -#define ATOMISP_ACC_FW_LOAD_FL_ENABLE BIT(16) - -#define ATOMISP_ACC_FW_LOAD_TYPE_NONE 0 /* Normal binary: don't use */ -#define ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT 1 /* Stage on output */ -#define ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER 2 /* Stage on viewfinder */ -#define ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE 3 /* Stand-alone acceleration */ - -struct atomisp_acc_map { - __u32 flags; /* Flags, see list below */ - __u32 length; /* Length of data in bytes */ - void __user *user_ptr; /* Pointer into user space */ - unsigned long css_ptr; /* Pointer into CSS address space */ - __u32 reserved[4]; /* Set to zero */ -}; - -#define ATOMISP_MAP_FLAG_NOFLUSH 0x0001 /* Do not flush cache */ -#define ATOMISP_MAP_FLAG_CACHED 0x0002 /* Enable cache */ -#define ATOMISP_MAP_FLAG_CONTIGUOUS 0x0004 -#define ATOMISP_MAP_FLAG_CLEARED 0x0008 - -struct atomisp_acc_state { - __u32 flags; /* Flags, see list below */ -#define ATOMISP_STATE_FLAG_ENABLE ATOMISP_ACC_FW_LOAD_FL_ENABLE - unsigned int fw_handle; -}; - struct atomisp_update_exposure { unsigned int gain; unsigned int digi_gain; @@ -1091,29 +992,6 @@ struct atomisp_sensor_ae_bracketing_lut { #define ATOMISP_IOC_S_3A_CONFIG \ _IOW('v', BASE_VIDIOC_PRIVATE + 23, struct atomisp_3a_config) -/* Accelerate ioctls */ -#define ATOMISP_IOC_ACC_LOAD \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_load) - -#define ATOMISP_IOC_ACC_UNLOAD \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 24, unsigned int) - -/* For CSS 1.0 only */ -#define ATOMISP_IOC_ACC_S_ARG \ - _IOW('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_arg) - -#define ATOMISP_IOC_ACC_START \ - _IOW('v', BASE_VIDIOC_PRIVATE + 24, unsigned int) - -#define ATOMISP_IOC_ACC_WAIT \ - _IOW('v', BASE_VIDIOC_PRIVATE + 25, unsigned int) - -#define ATOMISP_IOC_ACC_ABORT \ - _IOW('v', BASE_VIDIOC_PRIVATE + 25, struct atomisp_acc_fw_abort) - -#define ATOMISP_IOC_ACC_DESTAB \ - _IOW('v', BASE_VIDIOC_PRIVATE + 25, struct atomisp_acc_fw_arg) - /* sensor OTP memory read */ #define ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA \ _IOWR('v', BASE_VIDIOC_PRIVATE + 26, struct v4l2_private_int_data) @@ -1133,24 +1011,6 @@ struct atomisp_sensor_ae_bracketing_lut { #define ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA \ _IOWR('v', BASE_VIDIOC_PRIVATE + 29, struct v4l2_private_int_data) -/* - * Ioctls to map and unmap user buffers to CSS address space for acceleration. - * User fills fields length and user_ptr and sets other fields to zero, - * kernel may modify the flags and sets css_ptr. - */ -#define ATOMISP_IOC_ACC_MAP \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map) - -/* User fills fields length, user_ptr, and css_ptr and zeroes other fields. */ -#define ATOMISP_IOC_ACC_UNMAP \ - _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map) - -#define ATOMISP_IOC_ACC_S_MAPPED_ARG \ - _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_s_mapped_arg) - -#define ATOMISP_IOC_ACC_LOAD_TO_PIPE \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 31, struct atomisp_acc_fw_load_to_pipe) - #define ATOMISP_IOC_S_PARAMETERS \ _IOW('v', BASE_VIDIOC_PRIVATE + 32, struct atomisp_parameters) @@ -1184,12 +1044,6 @@ struct atomisp_sensor_ae_bracketing_lut { #define ATOMISP_IOC_S_EXPOSURE_WINDOW \ _IOW('v', BASE_VIDIOC_PRIVATE + 40, struct atomisp_ae_window) -#define ATOMISP_IOC_S_ACC_STATE \ - _IOW('v', BASE_VIDIOC_PRIVATE + 41, struct atomisp_acc_state) - -#define ATOMISP_IOC_G_ACC_STATE \ - _IOR('v', BASE_VIDIOC_PRIVATE + 41, struct atomisp_acc_state) - #define ATOMISP_IOC_INJECT_A_FAKE_EVENT \ _IOW('v', BASE_VIDIOC_PRIVATE + 42, int) diff --git a/drivers/staging/media/atomisp/notes.txt b/drivers/staging/media/atomisp/notes.txt new file mode 100644 index 000000000000..d128b792e05f --- /dev/null +++ b/drivers/staging/media/atomisp/notes.txt @@ -0,0 +1,30 @@ +Some notes about the working of the atomisp drivers (learned while working +on cleaning it up). + +The atomisp seems to be a generic DSP(ISP) like processor without a fixed +pipeline. It does not have its own memory, but instead uses main memory. +The ISP has its own address-space and main memory needs to be mapped into +its address space through the ISP's MMU. + +Memory is allocated by the hmm code. hmm_alloc() returns an ISP virtual +address. The hmm code keeps a list of all allocations and when necessary +the hmm code finds the backing hmm-buffer-object (hmm_bo) by looking +up the hmm_bo based on the ISP virtual address. + +The actual processing pipeline is made by loading one or more programs, +called binaries. The shisp_240??0_v21.bin firmware file contains many +different binaries. Binaries are picked by filling a ia_css_binary_descr +struct with various input and output parameters and then calling +ia_css_binary_find(). Some binaries support creating multiple outputs +(preview + video frame?) at the same time. + +For example for the /dev/video0 preview node load_preview_binaries() +from atomisp/pci/sh_css.c is called and then loads a preview and +optionally a scalar binary. Note when digital zoom is disabled +(it is enabled by default) only the preview binary is loaded. +So in this case a single binary handles the entire pipeline. + +Since getting a picture requires multiple processing steps, +this means that unlike in fixed pipelines the soft pipelines +on the ISP can do multiple processing steps in a single pipeline +element (in a single binary). diff --git a/drivers/staging/media/atomisp/pci/atomisp_acc.c b/drivers/staging/media/atomisp/pci/atomisp_acc.c deleted file mode 100644 index 28cb271663c4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp_acc.c +++ /dev/null @@ -1,625 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Support for Clovertrail PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2012 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -/* - * This file implements loadable acceleration firmware API, - * including ioctls to map and unmap acceleration parameters and buffers. - */ - -#include <linux/init.h> -#include <media/v4l2-event.h> - -#include "hmm.h" - -#include "atomisp_acc.h" -#include "atomisp_internal.h" -#include "atomisp_compat.h" -#include "atomisp_cmd.h" - -#include "ia_css.h" - -static const struct { - unsigned int flag; - enum ia_css_pipe_id pipe_id; -} acc_flag_to_pipe[] = { - { ATOMISP_ACC_FW_LOAD_FL_PREVIEW, IA_CSS_PIPE_ID_PREVIEW }, - { ATOMISP_ACC_FW_LOAD_FL_COPY, IA_CSS_PIPE_ID_COPY }, - { ATOMISP_ACC_FW_LOAD_FL_VIDEO, IA_CSS_PIPE_ID_VIDEO }, - { ATOMISP_ACC_FW_LOAD_FL_CAPTURE, IA_CSS_PIPE_ID_CAPTURE }, - { ATOMISP_ACC_FW_LOAD_FL_ACC, IA_CSS_PIPE_ID_ACC } -}; - -/* - * Allocate struct atomisp_acc_fw along with space for firmware. - * The returned struct atomisp_acc_fw is cleared (firmware region is not). - */ -static struct atomisp_acc_fw *acc_alloc_fw(unsigned int fw_size) -{ - struct atomisp_acc_fw *acc_fw; - - acc_fw = kzalloc(sizeof(*acc_fw), GFP_KERNEL); - if (!acc_fw) - return NULL; - - acc_fw->fw = vmalloc(fw_size); - if (!acc_fw->fw) { - kfree(acc_fw); - return NULL; - } - - return acc_fw; -} - -static void acc_free_fw(struct atomisp_acc_fw *acc_fw) -{ - vfree(acc_fw->fw); - kfree(acc_fw); -} - -static struct atomisp_acc_fw * -acc_get_fw(struct atomisp_sub_device *asd, unsigned int handle) -{ - struct atomisp_acc_fw *acc_fw; - - list_for_each_entry(acc_fw, &asd->acc.fw, list) - if (acc_fw->handle == handle) - return acc_fw; - - return NULL; -} - -static struct atomisp_map *acc_get_map(struct atomisp_sub_device *asd, - unsigned long css_ptr, size_t length) -{ - struct atomisp_map *atomisp_map; - - list_for_each_entry(atomisp_map, &asd->acc.memory_maps, list) { - if (atomisp_map->ptr == css_ptr && - atomisp_map->length == length) - return atomisp_map; - } - return NULL; -} - -static int acc_stop_acceleration(struct atomisp_sub_device *asd) -{ - int ret; - - ret = atomisp_css_stop_acc_pipe(asd); - atomisp_css_destroy_acc_pipe(asd); - - return ret; -} - -void atomisp_acc_cleanup(struct atomisp_device *isp) -{ - int i; - - for (i = 0; i < isp->num_of_streams; i++) - ida_destroy(&isp->asd[i].acc.ida); -} - -void atomisp_acc_release(struct atomisp_sub_device *asd) -{ - struct atomisp_acc_fw *acc_fw, *ta; - struct atomisp_map *atomisp_map, *tm; - - /* Stop acceleration if already running */ - if (asd->acc.pipeline) - acc_stop_acceleration(asd); - - /* Unload all loaded acceleration binaries */ - list_for_each_entry_safe(acc_fw, ta, &asd->acc.fw, list) { - list_del(&acc_fw->list); - ida_free(&asd->acc.ida, acc_fw->handle); - acc_free_fw(acc_fw); - } - - /* Free all mapped memory blocks */ - list_for_each_entry_safe(atomisp_map, tm, &asd->acc.memory_maps, list) { - list_del(&atomisp_map->list); - hmm_free(atomisp_map->ptr); - kfree(atomisp_map); - } -} - -int atomisp_acc_load_to_pipe(struct atomisp_sub_device *asd, - struct atomisp_acc_fw_load_to_pipe *user_fw) -{ - static const unsigned int pipeline_flags = - ATOMISP_ACC_FW_LOAD_FL_PREVIEW | ATOMISP_ACC_FW_LOAD_FL_COPY | - ATOMISP_ACC_FW_LOAD_FL_VIDEO | - ATOMISP_ACC_FW_LOAD_FL_CAPTURE | ATOMISP_ACC_FW_LOAD_FL_ACC; - - struct atomisp_acc_fw *acc_fw; - int handle; - - if (!user_fw->data || user_fw->size < sizeof(*acc_fw->fw)) - return -EINVAL; - - /* Binary has to be enabled at least for one pipeline */ - if (!(user_fw->flags & pipeline_flags)) - return -EINVAL; - - /* We do not support other flags yet */ - if (user_fw->flags & ~pipeline_flags) - return -EINVAL; - - if (user_fw->type < ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT || - user_fw->type > ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE) - return -EINVAL; - - if (asd->acc.pipeline || asd->acc.extension_mode) - return -EBUSY; - - acc_fw = acc_alloc_fw(user_fw->size); - if (!acc_fw) - return -ENOMEM; - - if (copy_from_user(acc_fw->fw, user_fw->data, user_fw->size)) { - acc_free_fw(acc_fw); - return -EFAULT; - } - - handle = ida_alloc(&asd->acc.ida, GFP_KERNEL); - if (handle < 0) { - acc_free_fw(acc_fw); - return -ENOSPC; - } - - user_fw->fw_handle = handle; - acc_fw->handle = handle; - acc_fw->flags = user_fw->flags; - acc_fw->type = user_fw->type; - acc_fw->fw->handle = handle; - - /* - * correct isp firmware type in order ISP firmware can be appended - * to correct pipe properly - */ - if (acc_fw->fw->type == ia_css_isp_firmware) { - static const int type_to_css[] = { - [ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT] = - IA_CSS_ACC_OUTPUT, - [ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER] = - IA_CSS_ACC_VIEWFINDER, - [ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE] = - IA_CSS_ACC_STANDALONE, - }; - acc_fw->fw->info.isp.type = type_to_css[acc_fw->type]; - } - - list_add_tail(&acc_fw->list, &asd->acc.fw); - return 0; -} - -int atomisp_acc_load(struct atomisp_sub_device *asd, - struct atomisp_acc_fw_load *user_fw) -{ - struct atomisp_acc_fw_load_to_pipe ltp = {0}; - int r; - - ltp.flags = ATOMISP_ACC_FW_LOAD_FL_ACC; - ltp.type = ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE; - ltp.size = user_fw->size; - ltp.data = user_fw->data; - r = atomisp_acc_load_to_pipe(asd, <p); - user_fw->fw_handle = ltp.fw_handle; - return r; -} - -int atomisp_acc_unload(struct atomisp_sub_device *asd, unsigned int *handle) -{ - struct atomisp_acc_fw *acc_fw; - - if (asd->acc.pipeline || asd->acc.extension_mode) - return -EBUSY; - - acc_fw = acc_get_fw(asd, *handle); - if (!acc_fw) - return -EINVAL; - - list_del(&acc_fw->list); - ida_free(&asd->acc.ida, acc_fw->handle); - acc_free_fw(acc_fw); - - return 0; -} - -int atomisp_acc_start(struct atomisp_sub_device *asd, unsigned int *handle) -{ - struct atomisp_device *isp = asd->isp; - struct atomisp_acc_fw *acc_fw; - int ret; - unsigned int nbin; - - if (asd->acc.pipeline || asd->acc.extension_mode) - return -EBUSY; - - /* Invalidate caches. FIXME: should flush only necessary buffers */ - wbinvd(); - - ret = atomisp_css_create_acc_pipe(asd); - if (ret) - return ret; - - nbin = 0; - list_for_each_entry(acc_fw, &asd->acc.fw, list) { - if (*handle != 0 && *handle != acc_fw->handle) - continue; - - if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE) - continue; - - /* Add the binary into the pipeline */ - ret = atomisp_css_load_acc_binary(asd, acc_fw->fw, nbin); - if (ret < 0) { - dev_err(isp->dev, "acc_load_binary failed\n"); - goto err_stage; - } - - ret = atomisp_css_set_acc_parameters(acc_fw); - if (ret < 0) { - dev_err(isp->dev, "acc_set_parameters failed\n"); - goto err_stage; - } - nbin++; - } - if (nbin < 1) { - /* Refuse creating pipelines with no binaries */ - dev_err(isp->dev, "%s: no acc binary available\n", __func__); - ret = -EINVAL; - goto err_stage; - } - - ret = atomisp_css_start_acc_pipe(asd); - if (ret) { - dev_err(isp->dev, "%s: atomisp_acc_start_acc_pipe failed\n", - __func__); - goto err_stage; - } - - return 0; - -err_stage: - atomisp_css_destroy_acc_pipe(asd); - return ret; -} - -int atomisp_acc_wait(struct atomisp_sub_device *asd, unsigned int *handle) -{ - struct atomisp_device *isp = asd->isp; - int ret; - - if (!asd->acc.pipeline) - return -ENOENT; - - if (*handle && !acc_get_fw(asd, *handle)) - return -EINVAL; - - ret = atomisp_css_wait_acc_finish(asd); - if (acc_stop_acceleration(asd) == -EIO) { - atomisp_reset(isp); - return -EINVAL; - } - - return ret; -} - -void atomisp_acc_done(struct atomisp_sub_device *asd, unsigned int handle) -{ - struct v4l2_event event = { 0 }; - - event.type = V4L2_EVENT_ATOMISP_ACC_COMPLETE; - event.u.frame_sync.frame_sequence = atomic_read(&asd->sequence); - event.id = handle; - - v4l2_event_queue(asd->subdev.devnode, &event); -} - -int atomisp_acc_map(struct atomisp_sub_device *asd, struct atomisp_acc_map *map) -{ - struct atomisp_map *atomisp_map; - ia_css_ptr cssptr; - int pgnr; - - if (map->css_ptr) - return -EINVAL; - - if (asd->acc.pipeline) - return -EBUSY; - - if (map->user_ptr) { - /* Buffer to map must be page-aligned */ - if ((unsigned long)map->user_ptr & ~PAGE_MASK) { - dev_err(asd->isp->dev, - "%s: mapped buffer address %p is not page aligned\n", - __func__, map->user_ptr); - return -EINVAL; - } - - pgnr = DIV_ROUND_UP(map->length, PAGE_SIZE); - if (pgnr < ((PAGE_ALIGN(map->length)) >> PAGE_SHIFT)) { - dev_err(asd->isp->dev, - "user space memory size is less than the expected size..\n"); - return -ENOMEM; - } else if (pgnr > ((PAGE_ALIGN(map->length)) >> PAGE_SHIFT)) { - dev_err(asd->isp->dev, - "user space memory size is large than the expected size..\n"); - return -ENOMEM; - } - - cssptr = hmm_alloc(map->length, HMM_BO_USER, 0, map->user_ptr, - map->flags & ATOMISP_MAP_FLAG_CACHED); - - } else { - /* Allocate private buffer. */ - cssptr = hmm_alloc(map->length, HMM_BO_PRIVATE, 0, NULL, - map->flags & ATOMISP_MAP_FLAG_CACHED); - } - - if (!cssptr) - return -ENOMEM; - - atomisp_map = kmalloc(sizeof(*atomisp_map), GFP_KERNEL); - if (!atomisp_map) { - hmm_free(cssptr); - return -ENOMEM; - } - atomisp_map->ptr = cssptr; - atomisp_map->length = map->length; - list_add(&atomisp_map->list, &asd->acc.memory_maps); - - dev_dbg(asd->isp->dev, "%s: userptr %p, css_address 0x%x, size %d\n", - __func__, map->user_ptr, cssptr, map->length); - map->css_ptr = cssptr; - return 0; -} - -int atomisp_acc_unmap(struct atomisp_sub_device *asd, - struct atomisp_acc_map *map) -{ - struct atomisp_map *atomisp_map; - - if (asd->acc.pipeline) - return -EBUSY; - - atomisp_map = acc_get_map(asd, map->css_ptr, map->length); - if (!atomisp_map) - return -EINVAL; - - list_del(&atomisp_map->list); - hmm_free(atomisp_map->ptr); - kfree(atomisp_map); - return 0; -} - -int atomisp_acc_s_mapped_arg(struct atomisp_sub_device *asd, - struct atomisp_acc_s_mapped_arg *arg) -{ - struct atomisp_acc_fw *acc_fw; - - if (arg->memory >= ATOMISP_ACC_NR_MEMORY) - return -EINVAL; - - if (asd->acc.pipeline) - return -EBUSY; - - acc_fw = acc_get_fw(asd, arg->fw_handle); - if (!acc_fw) - return -EINVAL; - - if (arg->css_ptr != 0 || arg->length != 0) { - /* Unless the parameter is cleared, check that it exists */ - if (!acc_get_map(asd, arg->css_ptr, arg->length)) - return -EINVAL; - } - - acc_fw->args[arg->memory].length = arg->length; - acc_fw->args[arg->memory].css_ptr = arg->css_ptr; - - dev_dbg(asd->isp->dev, "%s: mem %d, address %p, size %ld\n", - __func__, arg->memory, (void *)arg->css_ptr, - (unsigned long)arg->length); - return 0; -} - -static void atomisp_acc_unload_some_extensions(struct atomisp_sub_device *asd, - int i, - struct atomisp_acc_fw *acc_fw) -{ - while (--i >= 0) { - if (acc_fw->flags & acc_flag_to_pipe[i].flag) { - atomisp_css_unload_acc_extension(asd, acc_fw->fw, - acc_flag_to_pipe[i].pipe_id); - } - } -} - -/* - * Appends the loaded acceleration binary extensions to the - * current ISP mode. Must be called just before sh_css_start(). - */ -int atomisp_acc_load_extensions(struct atomisp_sub_device *asd) -{ - struct atomisp_acc_fw *acc_fw; - bool ext_loaded = false; - bool continuous = asd->continuous_mode->val && - asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW; - int ret = 0, i = -1; - struct atomisp_device *isp = asd->isp; - - if (asd->acc.pipeline || asd->acc.extension_mode) - return -EBUSY; - - /* Invalidate caches. FIXME: should flush only necessary buffers */ - wbinvd(); - - list_for_each_entry(acc_fw, &asd->acc.fw, list) { - if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT && - acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER) - continue; - - for (i = 0; i < ARRAY_SIZE(acc_flag_to_pipe); i++) { - /* - * QoS (ACC pipe) acceleration stages are - * currently allowed only in continuous mode. - * Skip them for all other modes. - */ - if (!continuous && - acc_flag_to_pipe[i].flag == - ATOMISP_ACC_FW_LOAD_FL_ACC) - continue; - - if (acc_fw->flags & acc_flag_to_pipe[i].flag) { - ret = atomisp_css_load_acc_extension(asd, - acc_fw->fw, - acc_flag_to_pipe[i].pipe_id, - acc_fw->type); - if (ret) { - atomisp_acc_unload_some_extensions(asd, i, acc_fw); - goto error; - } - - ext_loaded = true; - } - } - - ret = atomisp_css_set_acc_parameters(acc_fw); - if (ret < 0) { - atomisp_acc_unload_some_extensions(asd, i, acc_fw); - goto error; - } - } - - if (!ext_loaded) - return ret; - - ret = atomisp_css_update_stream(asd); - if (ret) { - dev_err(isp->dev, "%s: update stream failed.\n", __func__); - atomisp_acc_unload_extensions(asd); - goto error; - } - - asd->acc.extension_mode = true; - return 0; - -error: - list_for_each_entry_continue_reverse(acc_fw, &asd->acc.fw, list) { - if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT && - acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER) - continue; - - for (i = ARRAY_SIZE(acc_flag_to_pipe) - 1; i >= 0; i--) { - if (!continuous && - acc_flag_to_pipe[i].flag == - ATOMISP_ACC_FW_LOAD_FL_ACC) - continue; - if (acc_fw->flags & acc_flag_to_pipe[i].flag) { - atomisp_css_unload_acc_extension(asd, - acc_fw->fw, - acc_flag_to_pipe[i].pipe_id); - } - } - } - return ret; -} - -void atomisp_acc_unload_extensions(struct atomisp_sub_device *asd) -{ - struct atomisp_acc_fw *acc_fw; - int i; - - if (!asd->acc.extension_mode) - return; - - list_for_each_entry_reverse(acc_fw, &asd->acc.fw, list) { - if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT && - acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER) - continue; - - for (i = ARRAY_SIZE(acc_flag_to_pipe) - 1; i >= 0; i--) { - if (acc_fw->flags & acc_flag_to_pipe[i].flag) { - atomisp_css_unload_acc_extension(asd, - acc_fw->fw, - acc_flag_to_pipe[i].pipe_id); - } - } - } - - asd->acc.extension_mode = false; -} - -int atomisp_acc_set_state(struct atomisp_sub_device *asd, - struct atomisp_acc_state *arg) -{ - struct atomisp_acc_fw *acc_fw; - bool enable = (arg->flags & ATOMISP_STATE_FLAG_ENABLE) != 0; - struct ia_css_pipe *pipe; - int r; - int i; - - if (!asd->acc.extension_mode) - return -EBUSY; - - if (arg->flags & ~ATOMISP_STATE_FLAG_ENABLE) - return -EINVAL; - - acc_fw = acc_get_fw(asd, arg->fw_handle); - if (!acc_fw) - return -EINVAL; - - if (enable) - wbinvd(); - - for (i = 0; i < ARRAY_SIZE(acc_flag_to_pipe); i++) { - if (acc_fw->flags & acc_flag_to_pipe[i].flag) { - pipe = asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]. - pipes[acc_flag_to_pipe[i].pipe_id]; - r = ia_css_pipe_set_qos_ext_state(pipe, acc_fw->handle, - enable); - if (r) - return -EBADRQC; - } - } - - if (enable) - acc_fw->flags |= ATOMISP_ACC_FW_LOAD_FL_ENABLE; - else - acc_fw->flags &= ~ATOMISP_ACC_FW_LOAD_FL_ENABLE; - - return 0; -} - -int atomisp_acc_get_state(struct atomisp_sub_device *asd, - struct atomisp_acc_state *arg) -{ - struct atomisp_acc_fw *acc_fw; - - if (!asd->acc.extension_mode) - return -EBUSY; - - acc_fw = acc_get_fw(asd, arg->fw_handle); - if (!acc_fw) - return -EINVAL; - - arg->flags = acc_fw->flags; - - return 0; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp_acc.h b/drivers/staging/media/atomisp/pci/atomisp_acc.h deleted file mode 100644 index 48d94232229b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp_acc.h +++ /dev/null @@ -1,120 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Support for Clovertrail PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2012 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __ATOMISP_ACC_H__ -#define __ATOMISP_ACC_H__ - -#include "../../include/linux/atomisp.h" -#include "atomisp_internal.h" - -#include "ia_css_types.h" - -/* - * Interface functions for AtomISP driver acceleration API implementation. - */ - -struct atomisp_sub_device; - -void atomisp_acc_cleanup(struct atomisp_device *isp); - -/* - * Free up any allocated resources. - * Must be called each time when the device is closed. - * Note that there isn't corresponding open() call; - * this function may be called sequentially multiple times. - * Must be called to free up resources before driver is unloaded. - */ -void atomisp_acc_release(struct atomisp_sub_device *asd); - -/* Load acceleration binary. DEPRECATED. */ -int atomisp_acc_load(struct atomisp_sub_device *asd, - struct atomisp_acc_fw_load *fw); - -/* Load acceleration binary with specified properties */ -int atomisp_acc_load_to_pipe(struct atomisp_sub_device *asd, - struct atomisp_acc_fw_load_to_pipe *fw); - -/* Unload specified acceleration binary */ -int atomisp_acc_unload(struct atomisp_sub_device *asd, - unsigned int *handle); - -/* - * Map a memory region into ISP memory space. - */ -int atomisp_acc_map(struct atomisp_sub_device *asd, - struct atomisp_acc_map *map); - -/* - * Unmap a mapped memory region. - */ -int atomisp_acc_unmap(struct atomisp_sub_device *asd, - struct atomisp_acc_map *map); - -/* - * Set acceleration binary argument to a previously mapped memory region. - */ -int atomisp_acc_s_mapped_arg(struct atomisp_sub_device *asd, - struct atomisp_acc_s_mapped_arg *arg); - -/* - * Start acceleration. - * Return immediately, acceleration is left running in background. - * Specify either acceleration binary or pipeline which to start. - */ -int atomisp_acc_start(struct atomisp_sub_device *asd, - unsigned int *handle); - -/* - * Wait until acceleration finishes. - * This MUST be called after each acceleration has been started. - * Specify either acceleration binary or pipeline handle. - */ -int atomisp_acc_wait(struct atomisp_sub_device *asd, - unsigned int *handle); - -/* - * Used by ISR to notify ACC stage finished. - * This is internally used and does not export as IOCTL. - */ -void atomisp_acc_done(struct atomisp_sub_device *asd, unsigned int handle); - -/* - * Appends the loaded acceleration binary extensions to the - * current ISP mode. Must be called just before atomisp_css_start(). - */ -int atomisp_acc_load_extensions(struct atomisp_sub_device *asd); - -/* - * Must be called after streaming is stopped: - * unloads any loaded acceleration extensions. - */ -void atomisp_acc_unload_extensions(struct atomisp_sub_device *asd); - -/* - * Set acceleration firmware flags. - */ -int atomisp_acc_set_state(struct atomisp_sub_device *asd, - struct atomisp_acc_state *arg); - -/* - * Get acceleration firmware flags. - */ -int atomisp_acc_get_state(struct atomisp_sub_device *asd, - struct atomisp_acc_state *arg); - -#endif /* __ATOMISP_ACC_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c index 97d5a528969b..c932f340068f 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c +++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c @@ -42,7 +42,6 @@ #include "atomisp_ioctl.h" #include "atomisp-regs.h" #include "atomisp_tables.h" -#include "atomisp_acc.h" #include "atomisp_compat.h" #include "atomisp_subdev.h" #include "atomisp_dfs_tables.h" @@ -539,7 +538,7 @@ irqreturn_t atomisp_isr(int irq, void *dev) clear_irq_reg(isp); - if (!atomisp_streaming_count(isp) && !atomisp_is_acc_enabled(isp)) + if (!atomisp_streaming_count(isp)) goto out_nowake; for (i = 0; i < isp->num_of_streams; i++) { @@ -901,9 +900,9 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, int err; unsigned long irqflags; struct ia_css_frame *frame = NULL; - struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf_tmp; - struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf_tmp; - struct atomisp_metadata_buf *md_buf = NULL, *_md_buf_tmp; + struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf_tmp, *s3a_iter; + struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf_tmp, *dis_iter; + struct atomisp_metadata_buf *md_buf = NULL, *_md_buf_tmp, *md_iter; enum atomisp_metadata_type md_type; struct atomisp_device *isp = asd->isp; struct v4l2_control ctrl; @@ -942,60 +941,75 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, switch (buf_type) { case IA_CSS_BUFFER_TYPE_3A_STATISTICS: - list_for_each_entry_safe(s3a_buf, _s3a_buf_tmp, + list_for_each_entry_safe(s3a_iter, _s3a_buf_tmp, &asd->s3a_stats_in_css, list) { - if (s3a_buf->s3a_data == + if (s3a_iter->s3a_data == buffer.css_buffer.data.stats_3a) { - list_del_init(&s3a_buf->list); - list_add_tail(&s3a_buf->list, + list_del_init(&s3a_iter->list); + list_add_tail(&s3a_iter->list, &asd->s3a_stats_ready); + s3a_buf = s3a_iter; break; } } asd->s3a_bufs_in_css[css_pipe_id]--; atomisp_3a_stats_ready_event(asd, buffer.css_buffer.exp_id); - dev_dbg(isp->dev, "%s: s3a stat with exp_id %d is ready\n", - __func__, s3a_buf->s3a_data->exp_id); + if (s3a_buf) + dev_dbg(isp->dev, "%s: s3a stat with exp_id %d is ready\n", + __func__, s3a_buf->s3a_data->exp_id); + else + dev_dbg(isp->dev, "%s: s3a stat is ready with no exp_id found\n", + __func__); break; case IA_CSS_BUFFER_TYPE_METADATA: if (error) break; md_type = atomisp_get_metadata_type(asd, css_pipe_id); - list_for_each_entry_safe(md_buf, _md_buf_tmp, + list_for_each_entry_safe(md_iter, _md_buf_tmp, &asd->metadata_in_css[md_type], list) { - if (md_buf->metadata == + if (md_iter->metadata == buffer.css_buffer.data.metadata) { - list_del_init(&md_buf->list); - list_add_tail(&md_buf->list, + list_del_init(&md_iter->list); + list_add_tail(&md_iter->list, &asd->metadata_ready[md_type]); + md_buf = md_iter; break; } } asd->metadata_bufs_in_css[stream_id][css_pipe_id]--; atomisp_metadata_ready_event(asd, md_type); - dev_dbg(isp->dev, "%s: metadata with exp_id %d is ready\n", - __func__, md_buf->metadata->exp_id); + if (md_buf) + dev_dbg(isp->dev, "%s: metadata with exp_id %d is ready\n", + __func__, md_buf->metadata->exp_id); + else + dev_dbg(isp->dev, "%s: metadata is ready with no exp_id found\n", + __func__); break; case IA_CSS_BUFFER_TYPE_DIS_STATISTICS: - list_for_each_entry_safe(dis_buf, _dis_buf_tmp, + list_for_each_entry_safe(dis_iter, _dis_buf_tmp, &asd->dis_stats_in_css, list) { - if (dis_buf->dis_data == + if (dis_iter->dis_data == buffer.css_buffer.data.stats_dvs) { spin_lock_irqsave(&asd->dis_stats_lock, irqflags); - list_del_init(&dis_buf->list); - list_add(&dis_buf->list, &asd->dis_stats); + list_del_init(&dis_iter->list); + list_add(&dis_iter->list, &asd->dis_stats); asd->params.dis_proj_data_valid = true; spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags); + dis_buf = dis_iter; break; } } asd->dis_bufs_in_css--; - dev_dbg(isp->dev, "%s: dis stat with exp_id %d is ready\n", - __func__, dis_buf->dis_data->exp_id); + if (dis_buf) + dev_dbg(isp->dev, "%s: dis stat with exp_id %d is ready\n", + __func__, dis_buf->dis_data->exp_id); + else + dev_dbg(isp->dev, "%s: dis stat is ready with no exp_id found\n", + __func__); break; case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME: case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME: @@ -1302,34 +1316,11 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout) for (i = 0; i < isp->num_of_streams; i++) { struct atomisp_sub_device *asd = &isp->asd[i]; - struct ia_css_pipeline *acc_pipeline; - struct ia_css_pipe *acc_pipe = NULL; if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED && !asd->stream_prepared) continue; - /* - * AtomISP::waitStageUpdate is blocked when WDT happens. - * By calling acc_done() for all loaded fw_handles, - * HAL will be unblocked. - */ - acc_pipe = asd->stream_env[i].pipes[IA_CSS_PIPE_ID_ACC]; - if (acc_pipe) { - acc_pipeline = ia_css_pipe_get_pipeline(acc_pipe); - if (acc_pipeline) { - struct ia_css_pipeline_stage *stage; - - for (stage = acc_pipeline->stages; stage; - stage = stage->next) { - const struct ia_css_fw_info *fw; - - fw = stage->firmware; - atomisp_acc_done(asd, fw->handle); - } - } - } - depth_cnt++; if (asd->delayed_init == ATOMISP_DELAYED_INIT_QUEUED) @@ -1350,8 +1341,6 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout) dev_warn(isp->dev, "can't stop streaming on sensor!\n"); - atomisp_acc_unload_extensions(asd); - atomisp_clear_css_buffer_counters(asd); css_pipe_id = atomisp_get_css_pipe_id(asd); @@ -1863,7 +1852,7 @@ irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr) spin_lock_irqsave(&isp->lock, flags); - if (!atomisp_streaming_count(isp) && !atomisp_is_acc_enabled(isp)) { + if (!atomisp_streaming_count(isp)) { spin_unlock_irqrestore(&isp->lock, flags); return IRQ_HANDLED; } @@ -1914,9 +1903,6 @@ out: && isp->sw_contex.file_input) v4l2_subdev_call(isp->inputs[asd->input_curr].camera, video, s_stream, 1); - /* FIXME! FIX ACC implementation */ - if (asd->acc.pipeline && css_pipe_done[asd->index]) - atomisp_css_acc_done(asd); } dev_dbg(isp->dev, "<%s\n", __func__); @@ -6504,7 +6490,7 @@ int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id) ret = atomisp_css_exp_id_unlock(asd, exp_id); if (ret) { dev_err(asd->isp->dev, - "%s exp_id is wrapping back to %d but force unlock failed,, err %d.\n", + "%s exp_id is wrapping back to %d but force unlock failed, err %d.\n", __func__, exp_id, ret); return ret; } diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat.h b/drivers/staging/media/atomisp/pci/atomisp_compat.h index 64c1bf0943e6..3393ae6824f0 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_compat.h +++ b/drivers/staging/media/atomisp/pci/atomisp_compat.h @@ -240,7 +240,7 @@ int atomisp_css_input_configure_port(struct atomisp_sub_device *asd, unsigned int metadata_width, unsigned int metadata_height); -void atomisp_create_pipes_stream(struct atomisp_sub_device *asd); +int atomisp_create_pipes_stream(struct atomisp_sub_device *asd); void atomisp_destroy_pipes_stream_force(struct atomisp_sub_device *asd); void atomisp_css_stop(struct atomisp_sub_device *asd, @@ -442,33 +442,6 @@ int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd, int atomisp_css_update_stream(struct atomisp_sub_device *asd); -int atomisp_css_create_acc_pipe(struct atomisp_sub_device *asd); - -int atomisp_css_start_acc_pipe(struct atomisp_sub_device *asd); - -int atomisp_css_stop_acc_pipe(struct atomisp_sub_device *asd); - -void atomisp_css_destroy_acc_pipe(struct atomisp_sub_device *asd); - -int atomisp_css_load_acc_extension(struct atomisp_sub_device *asd, - struct ia_css_fw_info *fw, - enum ia_css_pipe_id pipe_id, - unsigned int type); - -void atomisp_css_unload_acc_extension(struct atomisp_sub_device *asd, - struct ia_css_fw_info *fw, - enum ia_css_pipe_id pipe_id); - -int atomisp_css_wait_acc_finish(struct atomisp_sub_device *asd); - -void atomisp_css_acc_done(struct atomisp_sub_device *asd); - -int atomisp_css_load_acc_binary(struct atomisp_sub_device *asd, - struct ia_css_fw_info *fw, - unsigned int index); - -void atomisp_css_unload_acc_binary(struct atomisp_sub_device *asd); - struct atomisp_acc_fw; int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw); diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c index 781a11cca599..5aa108a1724c 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c +++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c @@ -31,7 +31,6 @@ #include "atomisp-regs.h" #include "atomisp_fops.h" #include "atomisp_ioctl.h" -#include "atomisp_acc.h" #include "ia_css_debug.h" #include "ia_css_isp_param.h" @@ -419,24 +418,14 @@ static void __dump_stream_config(struct atomisp_sub_device *asd, } static int __destroy_stream(struct atomisp_sub_device *asd, - struct atomisp_stream_env *stream_env, bool force) + struct atomisp_stream_env *stream_env) { struct atomisp_device *isp = asd->isp; - int i; unsigned long timeout; if (!stream_env->stream) return 0; - if (!force) { - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) - if (stream_env->update_pipe[i]) - break; - - if (i == IA_CSS_PIPE_ID_NUM) - return 0; - } - if (stream_env->stream_state == CSS_STREAM_STARTED && ia_css_stream_stop(stream_env->stream) != 0) { dev_err(isp->dev, "stop stream failed.\n"); @@ -470,12 +459,12 @@ static int __destroy_stream(struct atomisp_sub_device *asd, return 0; } -static int __destroy_streams(struct atomisp_sub_device *asd, bool force) +static int __destroy_streams(struct atomisp_sub_device *asd) { int ret, i; for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) { - ret = __destroy_stream(asd, &asd->stream_env[i], force); + ret = __destroy_stream(asd, &asd->stream_env[i]); if (ret) return ret; } @@ -530,21 +519,19 @@ static int __create_streams(struct atomisp_sub_device *asd) return 0; rollback: for (i--; i >= 0; i--) - __destroy_stream(asd, &asd->stream_env[i], true); + __destroy_stream(asd, &asd->stream_env[i]); return ret; } static int __destroy_stream_pipes(struct atomisp_sub_device *asd, - struct atomisp_stream_env *stream_env, - bool force) + struct atomisp_stream_env *stream_env) { struct atomisp_device *isp = asd->isp; int ret = 0; int i; for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) { - if (!stream_env->pipes[i] || - !(force || stream_env->update_pipe[i])) + if (!stream_env->pipes[i]) continue; if (ia_css_pipe_destroy(stream_env->pipes[i]) != 0) { @@ -558,7 +545,7 @@ static int __destroy_stream_pipes(struct atomisp_sub_device *asd, return ret; } -static int __destroy_pipes(struct atomisp_sub_device *asd, bool force) +static int __destroy_pipes(struct atomisp_sub_device *asd) { struct atomisp_device *isp = asd->isp; int i; @@ -572,7 +559,7 @@ static int __destroy_pipes(struct atomisp_sub_device *asd, bool force) continue; } - ret = __destroy_stream_pipes(asd, &asd->stream_env[i], force); + ret = __destroy_stream_pipes(asd, &asd->stream_env[i]); if (ret) return ret; } @@ -582,8 +569,11 @@ static int __destroy_pipes(struct atomisp_sub_device *asd, bool force) void atomisp_destroy_pipes_stream_force(struct atomisp_sub_device *asd) { - __destroy_streams(asd, true); - __destroy_pipes(asd, true); + if (__destroy_streams(asd)) + dev_warn(asd->isp->dev, "destroy stream failed.\n"); + + if (__destroy_pipes(asd)) + dev_warn(asd->isp->dev, "destroy pipe failed.\n"); } static void __apply_additional_pipe_config( @@ -786,39 +776,32 @@ pipe_err: return -EINVAL; } -void atomisp_create_pipes_stream(struct atomisp_sub_device *asd) -{ - __create_pipes(asd); - __create_streams(asd); -} - -int atomisp_css_update_stream(struct atomisp_sub_device *asd) +int atomisp_create_pipes_stream(struct atomisp_sub_device *asd) { int ret; - struct atomisp_device *isp = asd->isp; - - if (__destroy_streams(asd, true)) - dev_warn(isp->dev, "destroy stream failed.\n"); - - if (__destroy_pipes(asd, true)) - dev_warn(isp->dev, "destroy pipe failed.\n"); ret = __create_pipes(asd); if (ret) { - dev_err(isp->dev, "create pipe failed %d.\n", ret); - return -EIO; + dev_err(asd->isp->dev, "create pipe failed %d.\n", ret); + return ret; } ret = __create_streams(asd); if (ret) { - dev_warn(isp->dev, "create stream failed %d.\n", ret); - __destroy_pipes(asd, true); - return -EIO; + dev_warn(asd->isp->dev, "create stream failed %d.\n", ret); + __destroy_pipes(asd); + return ret; } return 0; } +int atomisp_css_update_stream(struct atomisp_sub_device *asd) +{ + atomisp_destroy_pipes_stream_force(asd); + return atomisp_create_pipes_stream(asd); +} + int atomisp_css_init(struct atomisp_device *isp) { unsigned int mmu_base_addr; @@ -1103,23 +1086,12 @@ int atomisp_css_start(struct atomisp_sub_device *asd, int ret = 0, i = 0; if (in_reset) { - if (__destroy_streams(asd, true)) - dev_warn(isp->dev, "destroy stream failed.\n"); - - if (__destroy_pipes(asd, true)) - dev_warn(isp->dev, "destroy pipe failed.\n"); + ret = atomisp_css_update_stream(asd); + if (ret) + return ret; - if (__create_pipes(asd)) { - dev_err(isp->dev, "create pipe error.\n"); - return -EINVAL; - } - if (__create_streams(asd)) { - dev_err(isp->dev, "create stream error.\n"); - ret = -EINVAL; - goto stream_err; - } - /* in_reset == true, extension firmwares are reloaded after the recovery */ - atomisp_acc_load_extensions(asd); + /* Invalidate caches. FIXME: should flush only necessary buffers */ + wbinvd(); } /* @@ -1134,15 +1106,9 @@ int atomisp_css_start(struct atomisp_sub_device *asd, * recreated in the next stream on. */ if (!asd->stream_prepared) { - if (__create_pipes(asd)) { - dev_err(isp->dev, "create pipe error.\n"); - return -EINVAL; - } - if (__create_streams(asd)) { - dev_err(isp->dev, "create stream error.\n"); - ret = -EINVAL; - goto stream_err; - } + ret = atomisp_create_pipes_stream(asd); + if (ret) + return ret; } /* * SP can only be started one time @@ -1181,9 +1147,7 @@ int atomisp_css_start(struct atomisp_sub_device *asd, return 0; start_err: - __destroy_streams(asd, true); -stream_err: - __destroy_pipes(asd, true); + atomisp_destroy_pipes_stream_force(asd); /* css 2.0 API limitation: ia_css_stop_sp() could be only called after * destroy all pipes @@ -2088,13 +2052,8 @@ void atomisp_css_stop(struct atomisp_sub_device *asd, unsigned long irqflags; unsigned int i; - /* if is called in atomisp_reset(), force destroy stream */ - if (__destroy_streams(asd, true)) - dev_err(isp->dev, "destroy stream failed.\n"); - - /* if is called in atomisp_reset(), force destroy all pipes */ - if (__destroy_pipes(asd, true)) - dev_err(isp->dev, "destroy pipes failed.\n"); + /* if is called in atomisp_reset(), force destroy streams and pipes */ + atomisp_destroy_pipes_stream_force(asd); atomisp_init_raw_buffer_bitmap(asd); @@ -2634,27 +2593,15 @@ static int __get_frame_info(struct atomisp_sub_device *asd, struct ia_css_pipe_info p_info; /* FIXME! No need to destroy/recreate all streams */ - if (__destroy_streams(asd, true)) - dev_warn(isp->dev, "destroy stream failed.\n"); - - if (__destroy_pipes(asd, true)) - dev_warn(isp->dev, "destroy pipe failed.\n"); - - if (__create_pipes(asd)) { - dev_err(isp->dev, "can't create pipes\n"); - return -EINVAL; - } - - if (__create_streams(asd)) { - dev_err(isp->dev, "can't create streams\n"); - goto stream_err; - } + ret = atomisp_css_update_stream(asd); + if (ret) + return ret; ret = ia_css_pipe_get_info(asd->stream_env[stream_index].pipes[pipe_id], &p_info); if (ret) { dev_err(isp->dev, "can't get info from pipe\n"); - goto stream_err; + goto get_info_err; } switch (type) { @@ -2685,8 +2632,8 @@ static int __get_frame_info(struct atomisp_sub_device *asd, return 0; -stream_err: - __destroy_pipes(asd, true); +get_info_err: + atomisp_destroy_pipes_stream_force(asd); return -EINVAL; } @@ -3824,30 +3771,6 @@ void atomisp_css_set_cont_prev_start_time(struct atomisp_device *isp, return; } -void atomisp_css_acc_done(struct atomisp_sub_device *asd) -{ - complete(&asd->acc.acc_done); -} - -int atomisp_css_wait_acc_finish(struct atomisp_sub_device *asd) -{ - int ret = 0; - struct atomisp_device *isp = asd->isp; - - /* Unlock the isp mutex taken in IOCTL handler before sleeping! */ - rt_mutex_unlock(&isp->mutex); - if (wait_for_completion_interruptible_timeout(&asd->acc.acc_done, - ATOMISP_ISP_TIMEOUT_DURATION) == 0) { - dev_err(isp->dev, "<%s: completion timeout\n", __func__); - ia_css_debug_dump_sp_sw_debug_info(); - ia_css_debug_dump_debug_info(__func__); - ret = -EIO; - } - rt_mutex_lock(&isp->mutex); - - return ret; -} - /* Set the ACC binary arguments */ int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw) { @@ -3866,204 +3789,6 @@ int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw) return 0; } -/* Load acc binary extension */ -int atomisp_css_load_acc_extension(struct atomisp_sub_device *asd, - struct ia_css_fw_info *fw, - enum ia_css_pipe_id pipe_id, - unsigned int type) -{ - struct ia_css_fw_info **hd; - - fw->next = NULL; - hd = &(asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .pipe_configs[pipe_id].acc_extension); - while (*hd) - hd = &(*hd)->next; - *hd = fw; - - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .update_pipe[pipe_id] = true; - return 0; -} - -/* Unload acc binary extension */ -void atomisp_css_unload_acc_extension(struct atomisp_sub_device *asd, - struct ia_css_fw_info *fw, - enum ia_css_pipe_id pipe_id) -{ - struct ia_css_fw_info **hd; - - hd = &(asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .pipe_configs[pipe_id].acc_extension); - while (*hd && *hd != fw) - hd = &(*hd)->next; - if (!*hd) { - dev_err(asd->isp->dev, "did not find acc fw for removal\n"); - return; - } - *hd = fw->next; - fw->next = NULL; - - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .update_pipe[pipe_id] = true; -} - -int atomisp_css_create_acc_pipe(struct atomisp_sub_device *asd) -{ - struct atomisp_device *isp = asd->isp; - struct ia_css_pipe_config *pipe_config; - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - - if (stream_env->acc_stream) { - if (stream_env->acc_stream_state == CSS_STREAM_STARTED) { - if (ia_css_stream_stop(stream_env->acc_stream) - != 0) { - dev_err(isp->dev, "stop acc_stream failed.\n"); - return -EBUSY; - } - } - - if (ia_css_stream_destroy(stream_env->acc_stream) - != 0) { - dev_err(isp->dev, "destroy acc_stream failed.\n"); - return -EBUSY; - } - stream_env->acc_stream = NULL; - } - - pipe_config = &stream_env->pipe_configs[IA_CSS_PIPE_ID_ACC]; - ia_css_pipe_config_defaults(pipe_config); - asd->acc.acc_stages = kzalloc(MAX_ACC_STAGES * - sizeof(void *), GFP_KERNEL); - if (!asd->acc.acc_stages) - return -ENOMEM; - pipe_config->acc_stages = asd->acc.acc_stages; - pipe_config->mode = IA_CSS_PIPE_MODE_ACC; - pipe_config->num_acc_stages = 0; - - /* - * We delay the ACC pipeline creation to atomisp_css_start_acc_pipe, - * because pipe configuration will soon be changed by - * atomisp_css_load_acc_binary() - */ - return 0; -} - -int atomisp_css_start_acc_pipe(struct atomisp_sub_device *asd) -{ - struct atomisp_device *isp = asd->isp; - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - struct ia_css_pipe_config *pipe_config = - &stream_env->pipe_configs[IA_CSS_PIPE_ID_ACC]; - - if (ia_css_pipe_create(pipe_config, - &stream_env->pipes[IA_CSS_PIPE_ID_ACC]) != 0) { - dev_err(isp->dev, "%s: ia_css_pipe_create failed\n", - __func__); - return -EBADE; - } - - memset(&stream_env->acc_stream_config, 0, - sizeof(struct ia_css_stream_config)); - if (ia_css_stream_create(&stream_env->acc_stream_config, 1, - &stream_env->pipes[IA_CSS_PIPE_ID_ACC], - &stream_env->acc_stream) != 0) { - dev_err(isp->dev, "%s: create acc_stream error.\n", __func__); - return -EINVAL; - } - stream_env->acc_stream_state = CSS_STREAM_CREATED; - - init_completion(&asd->acc.acc_done); - asd->acc.pipeline = stream_env->pipes[IA_CSS_PIPE_ID_ACC]; - - atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, false); - - if (ia_css_start_sp()) { - dev_err(isp->dev, "start sp error.\n"); - return -EIO; - } - - if (ia_css_stream_start(stream_env->acc_stream) - != 0) { - dev_err(isp->dev, "acc_stream start error.\n"); - return -EIO; - } - - stream_env->acc_stream_state = CSS_STREAM_STARTED; - return 0; -} - -int atomisp_css_stop_acc_pipe(struct atomisp_sub_device *asd) -{ - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - if (stream_env->acc_stream_state == CSS_STREAM_STARTED) { - ia_css_stream_stop(stream_env->acc_stream); - stream_env->acc_stream_state = CSS_STREAM_STOPPED; - } - return 0; -} - -void atomisp_css_destroy_acc_pipe(struct atomisp_sub_device *asd) -{ - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - if (stream_env->acc_stream) { - if (ia_css_stream_destroy(stream_env->acc_stream) - != 0) - dev_warn(asd->isp->dev, - "destroy acc_stream failed.\n"); - stream_env->acc_stream = NULL; - } - - if (stream_env->pipes[IA_CSS_PIPE_ID_ACC]) { - if (ia_css_pipe_destroy(stream_env->pipes[IA_CSS_PIPE_ID_ACC]) - != 0) - dev_warn(asd->isp->dev, - "destroy ACC pipe failed.\n"); - stream_env->pipes[IA_CSS_PIPE_ID_ACC] = NULL; - stream_env->update_pipe[IA_CSS_PIPE_ID_ACC] = false; - ia_css_pipe_config_defaults( - &stream_env->pipe_configs[IA_CSS_PIPE_ID_ACC]); - ia_css_pipe_extra_config_defaults( - &stream_env->pipe_extra_configs[IA_CSS_PIPE_ID_ACC]); - } - asd->acc.pipeline = NULL; - - /* css 2.0 API limitation: ia_css_stop_sp() could be only called after - * destroy all pipes - */ - ia_css_stop_sp(); - - kfree(asd->acc.acc_stages); - asd->acc.acc_stages = NULL; - - atomisp_freq_scaling(asd->isp, ATOMISP_DFS_MODE_LOW, false); -} - -int atomisp_css_load_acc_binary(struct atomisp_sub_device *asd, - struct ia_css_fw_info *fw, - unsigned int index) -{ - struct ia_css_pipe_config *pipe_config = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .pipe_configs[IA_CSS_PIPE_ID_ACC]; - - if (index >= MAX_ACC_STAGES) { - dev_dbg(asd->isp->dev, "%s: index(%d) out of range\n", - __func__, index); - return -ENOMEM; - } - - pipe_config->acc_stages[index] = fw; - pipe_config->num_acc_stages = index + 1; - pipe_config->acc_num_execs = 1; - - return 0; -} - static struct atomisp_sub_device *__get_atomisp_subdev( struct ia_css_pipe *css_pipe, struct atomisp_device *isp, @@ -4075,8 +3800,7 @@ static struct atomisp_sub_device *__get_atomisp_subdev( for (i = 0; i < isp->num_of_streams; i++) { asd = &isp->asd[i]; - if (asd->streaming == ATOMISP_DEVICE_STREAMING_DISABLED && - !asd->acc.pipeline) + if (asd->streaming == ATOMISP_DEVICE_STREAMING_DISABLED) continue; for (j = 0; j < ATOMISP_INPUT_STREAM_NUM; j++) { stream_env = &asd->stream_env[j]; @@ -4211,8 +3935,7 @@ int atomisp_css_isr_thread(struct atomisp_device *isp, css_pipe_done[asd->index] = true; break; case IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE: - dev_dbg(isp->dev, "event: acc stage done"); - atomisp_acc_done(asd, current_event.event.fw_handle); + dev_warn(isp->dev, "unexpected event: acc stage done"); break; default: dev_dbg(isp->dev, "unhandled css stored event: 0x%x\n", diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h b/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h index 86d3fbe01378..33821b51d90e 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h +++ b/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h @@ -140,19 +140,6 @@ struct atomisp_calibration_group32 { compat_uptr_t calb_grp_values; }; -struct atomisp_acc_fw_load32 { - unsigned int size; - unsigned int fw_handle; - compat_uptr_t data; -}; - -struct atomisp_acc_fw_arg32 { - unsigned int fw_handle; - unsigned int index; - compat_uptr_t value; - compat_size_t size; -}; - struct v4l2_private_int_data32 { __u32 size; compat_uptr_t data; @@ -170,21 +157,6 @@ struct atomisp_shading_table32 { compat_uptr_t data[ATOMISP_NUM_SC_COLORS]; }; -struct atomisp_acc_map32 { - __u32 flags; /* Flags, see list below */ - __u32 length; /* Length of data in bytes */ - compat_uptr_t user_ptr; /* Pointer into user space */ - compat_ulong_t css_ptr; /* Pointer into CSS address space */ - __u32 reserved[4]; /* Set to zero */ -}; - -struct atomisp_acc_s_mapped_arg32 { - unsigned int fw_handle; - __u32 memory; /* one of enum atomisp_acc_memory */ - compat_size_t length; - compat_ulong_t css_ptr; -}; - struct atomisp_parameters32 { compat_uptr_t wb_config; /* White Balance config */ compat_uptr_t cc_config; /* Color Correction config */ @@ -265,15 +237,6 @@ struct atomisp_parameters32 { u32 per_frame_setting; }; -struct atomisp_acc_fw_load_to_pipe32 { - __u32 flags; /* Flags, see below for valid values */ - unsigned int fw_handle; /* Handle, filled by kernel. */ - __u32 size; /* Firmware binary size */ - compat_uptr_t data; /* Pointer to firmware */ - __u32 type; /* Binary type */ - __u32 reserved[3]; /* Set to zero */ -}; - struct atomisp_dvs_6axis_config32 { u32 exp_id; u32 width_y; @@ -323,15 +286,6 @@ struct atomisp_sensor_ae_bracketing_lut32 { #define ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP32 \ _IOWR('v', BASE_VIDIOC_PRIVATE + 22, struct atomisp_calibration_group32) -#define ATOMISP_IOC_ACC_LOAD32 \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_load32) - -#define ATOMISP_IOC_ACC_S_ARG32 \ - _IOW('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_arg32) - -#define ATOMISP_IOC_ACC_DESTAB32 \ - _IOW('v', BASE_VIDIOC_PRIVATE + 25, struct atomisp_acc_fw_arg32) - #define ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA32 \ _IOWR('v', BASE_VIDIOC_PRIVATE + 26, struct v4l2_private_int_data32) @@ -341,18 +295,6 @@ struct atomisp_sensor_ae_bracketing_lut32 { #define ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA32 \ _IOWR('v', BASE_VIDIOC_PRIVATE + 29, struct v4l2_private_int_data32) -#define ATOMISP_IOC_ACC_MAP32 \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map32) - -#define ATOMISP_IOC_ACC_UNMAP32 \ - _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map32) - -#define ATOMISP_IOC_ACC_S_MAPPED_ARG32 \ - _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_s_mapped_arg32) - -#define ATOMISP_IOC_ACC_LOAD_TO_PIPE32 \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 31, struct atomisp_acc_fw_load_to_pipe32) - #define ATOMISP_IOC_S_PARAMETERS32 \ _IOW('v', BASE_VIDIOC_PRIVATE + 32, struct atomisp_parameters32) diff --git a/drivers/staging/media/atomisp/pci/atomisp_drvfs.c b/drivers/staging/media/atomisp/pci/atomisp_drvfs.c index dcb571f515a7..3ddc935ec01d 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_drvfs.c +++ b/drivers/staging/media/atomisp/pci/atomisp_drvfs.c @@ -45,10 +45,8 @@ struct _iunit_debug { #define OPTION_BIN_LIST BIT(0) #define OPTION_BIN_RUN BIT(1) -#define OPTION_MEM_STAT BIT(2) #define OPTION_VALID (OPTION_BIN_LIST \ - | OPTION_BIN_RUN \ - | OPTION_MEM_STAT) + | OPTION_BIN_RUN) static struct _iunit_debug iunit_debug = { .dbglvl = 0, @@ -81,9 +79,6 @@ static inline int iunit_dump_dbgopt(struct atomisp_device *isp, goto opt_err; } } - - if (opt & OPTION_MEM_STAT) - hmm_show_mem_stat(__func__, __LINE__); } else { ret = -EINVAL; dev_err(isp->dev, "%s dump nothing[ret=%d]\n", __func__, ret); diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c index be6a74d5ac19..77150e4ae144 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_fops.c +++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c @@ -38,8 +38,6 @@ #include "type_support.h" #include "device_access/device_access.h" -#include "atomisp_acc.h" - #define ISP_LEFT_PAD 128 /* equal to 2*NWAY */ /* @@ -865,12 +863,6 @@ dev_init: goto error; } - if (dypool_enable) { - ret = hmm_pool_register(dypool_pgnr, HMM_POOL_TYPE_DYNAMIC); - if (ret) - dev_err(isp->dev, "Failed to register dynamic memory pool.\n"); - } - /* Init ISP */ if (atomisp_css_init(isp)) { ret = -EINVAL; @@ -910,7 +902,6 @@ css_error: atomisp_css_uninit(isp); pm_runtime_put(vdev->v4l2_dev->dev); error: - hmm_pool_unregister(HMM_POOL_TYPE_DYNAMIC); rt_mutex_unlock(&isp->mutex); return ret; } @@ -1021,8 +1012,6 @@ subdev_uninit: if (atomisp_dev_users(isp)) goto done; - atomisp_acc_release(asd); - atomisp_destroy_pipes_stream_force(asd); atomisp_css_uninit(isp); @@ -1032,8 +1021,6 @@ subdev_uninit: isp->css_env.isp_css_fw.bytes = 0; } - hmm_pool_unregister(HMM_POOL_TYPE_DYNAMIC); - ret = v4l2_subdev_call(isp->flash, core, s_power, 0); if (ret < 0 && ret != -ENODEV && ret != -ENOIOCTLCMD) dev_warn(isp->dev, "Failed to power-off flash\n"); diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c index 7e47db82de07..bf527b366ab3 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c +++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c @@ -1284,7 +1284,7 @@ static int gmin_get_config_var(struct device *maindev, const struct dmi_system_id *id; struct device *dev = maindev; char var8[CFG_VAR_NAME_MAX]; - struct efivar_entry *ev; + efi_status_t status; int i, ret; /* For sensors, try first to use the _DSM table */ @@ -1326,24 +1326,11 @@ static int gmin_get_config_var(struct device *maindev, for (i = 0; i < sizeof(var8) && var8[i]; i++) var16[i] = var8[i]; - /* Not sure this API usage is kosher; efivar_entry_get()'s - * implementation simply uses VariableName and VendorGuid from - * the struct and ignores the rest, but it seems like there - * ought to be an "official" efivar_entry registered - * somewhere? - */ - ev = kzalloc(sizeof(*ev), GFP_KERNEL); - if (!ev) - return -ENOMEM; - memcpy(&ev->var.VariableName, var16, sizeof(var16)); - ev->var.VendorGuid = GMIN_CFG_VAR_EFI_GUID; - ev->var.DataSize = *out_len; - - ret = efivar_entry_get(ev, &ev->var.Attributes, - &ev->var.DataSize, ev->var.Data); - if (ret == 0) { - memcpy(out, ev->var.Data, ev->var.DataSize); - *out_len = ev->var.DataSize; + status = EFI_UNSUPPORTED; + if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) + status = efi.get_variable(var16, &GMIN_CFG_VAR_EFI_GUID, NULL, + (unsigned long *)out_len, out); + if (status == EFI_SUCCESS) { dev_info(maindev, "found EFI entry for '%s'\n", var8); } else if (is_gmin) { dev_info(maindev, "Failed to find EFI gmin variable %s\n", var8); @@ -1351,8 +1338,6 @@ static int gmin_get_config_var(struct device *maindev, dev_info(maindev, "Failed to find EFI variable %s\n", var8); } - kfree(ev); - return ret; } diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c index 8fd470efd658..459645c2e2a7 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c +++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c @@ -25,7 +25,6 @@ #include <media/v4l2-event.h> #include <media/videobuf-vmalloc.h> -#include "atomisp_acc.h" #include "atomisp_cmd.h" #include "atomisp_common.h" #include "atomisp_fops.h" @@ -625,17 +624,6 @@ unsigned int atomisp_streaming_count(struct atomisp_device *isp) return sum; } -unsigned int atomisp_is_acc_enabled(struct atomisp_device *isp) -{ - unsigned int i; - - for (i = 0; i < isp->num_of_streams; i++) - if (isp->asd[i].acc.pipeline) - return 1; - - return 0; -} - /* * get input are used to get current primary/secondary camera */ @@ -1371,7 +1359,7 @@ static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) ret = ia_css_frame_map(&handle, &frame_info, (void __user *)buf->m.userptr, - 0, pgnr); + pgnr); if (ret) { dev_err(isp->dev, "Failed to map user buffer\n"); goto error; @@ -1913,11 +1901,8 @@ static int atomisp_streamon(struct file *file, void *fh, css_pipe_id = atomisp_get_css_pipe_id(asd); - ret = atomisp_acc_load_extensions(asd); - if (ret < 0) { - dev_err(isp->dev, "acc extension failed to load\n"); - goto out; - } + /* Invalidate caches. FIXME: should flush only necessary buffers */ + wbinvd(); if (asd->params.css_update_params_needed) { atomisp_apply_css_parameters(asd, &asd->params.css_param); @@ -2154,7 +2139,6 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) video, s_stream, 0); rt_mutex_lock(&isp->mutex); - atomisp_acc_unload_extensions(asd); } spin_lock_irqsave(&isp->lock, flags); @@ -2283,8 +2267,17 @@ stopsensor: dev_err(isp->dev, "atomisp_reset"); atomisp_reset(isp); for (i = 0; i < isp->num_of_streams; i++) { - if (recreate_streams[i]) - atomisp_create_pipes_stream(&isp->asd[i]); + if (recreate_streams[i]) { + int ret2; + + ret2 = atomisp_create_pipes_stream(&isp->asd[i]); + if (ret2) { + dev_err(isp->dev, "%s error re-creating streams: %d\n", + __func__, ret2); + if (!ret) + ret = ret2; + } + } } isp->isp_timeout = false; } @@ -3118,38 +3111,6 @@ static long atomisp_vidioc_default(struct file *file, void *fh, err = -EINVAL; break; - case ATOMISP_IOC_ACC_LOAD: - err = atomisp_acc_load(asd, arg); - break; - - case ATOMISP_IOC_ACC_LOAD_TO_PIPE: - err = atomisp_acc_load_to_pipe(asd, arg); - break; - - case ATOMISP_IOC_ACC_UNLOAD: - err = atomisp_acc_unload(asd, arg); - break; - - case ATOMISP_IOC_ACC_START: - err = atomisp_acc_start(asd, arg); - break; - - case ATOMISP_IOC_ACC_WAIT: - err = atomisp_acc_wait(asd, arg); - break; - - case ATOMISP_IOC_ACC_MAP: - err = atomisp_acc_map(asd, arg); - break; - - case ATOMISP_IOC_ACC_UNMAP: - err = atomisp_acc_unmap(asd, arg); - break; - - case ATOMISP_IOC_ACC_S_MAPPED_ARG: - err = atomisp_acc_s_mapped_arg(asd, arg); - break; - case ATOMISP_IOC_S_ISP_SHD_TAB: err = atomisp_set_shading_table(asd, arg); break; @@ -3198,12 +3159,6 @@ static long atomisp_vidioc_default(struct file *file, void *fh, case ATOMISP_IOC_S_EXPOSURE_WINDOW: err = atomisp_s_ae_window(asd, arg); break; - case ATOMISP_IOC_S_ACC_STATE: - err = atomisp_acc_set_state(asd, arg); - break; - case ATOMISP_IOC_G_ACC_STATE: - err = atomisp_acc_get_state(asd, arg); - break; case ATOMISP_IOC_INJECT_A_FAKE_EVENT: err = atomisp_inject_a_fake_event(asd, arg); break; diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.h b/drivers/staging/media/atomisp/pci/atomisp_ioctl.h index 412bfcf33c0f..d85e0d697a4e 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.h +++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.h @@ -57,7 +57,6 @@ extern const struct v4l2_ioctl_ops atomisp_file_ioctl_ops; unsigned int atomisp_streaming_count(struct atomisp_device *isp); -unsigned int atomisp_is_acc_enabled(struct atomisp_device *isp); /* compat_ioctl for 32bit userland app and 64bit kernel */ long atomisp_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg); diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c index 1807cfa786a7..394fe6959033 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c +++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c @@ -1081,9 +1081,6 @@ static void atomisp_init_acc_pipe(struct atomisp_sub_device *asd, { pipe->asd = asd; pipe->isp = asd->isp; - INIT_LIST_HEAD(&asd->acc.fw); - INIT_LIST_HEAD(&asd->acc.memory_maps); - ida_init(&asd->acc.ida); } /* diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp_subdev.h index 7d731f1fee72..798a93793a9a 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_subdev.h +++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.h @@ -322,16 +322,6 @@ struct atomisp_sub_device { struct v4l2_ctrl *disable_dz; - struct { - struct list_head fw; - struct list_head memory_maps; - struct ia_css_pipe *pipeline; - bool extension_mode; - struct ida ida; - struct completion acc_done; - void *acc_stages; - } acc; - struct atomisp_subdev_params params; struct atomisp_stream_env stream_env[ATOMISP_INPUT_STREAM_NUM]; diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c index 49ccfb1646da..643ba981601b 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c +++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c @@ -37,7 +37,6 @@ #include "atomisp_file.h" #include "atomisp_ioctl.h" #include "atomisp_internal.h" -#include "atomisp_acc.h" #include "atomisp-regs.h" #include "atomisp_dfs_tables.h" #include "atomisp_drvfs.h" @@ -59,23 +58,6 @@ static uint skip_fwload; module_param(skip_fwload, uint, 0644); MODULE_PARM_DESC(skip_fwload, "Skip atomisp firmware load"); -/* set reserved memory pool size in page */ -static unsigned int repool_pgnr = 32768; -module_param(repool_pgnr, uint, 0644); -MODULE_PARM_DESC(repool_pgnr, - "Set the reserved memory pool size in page (default:32768)"); - -/* set dynamic memory pool size in page */ -unsigned int dypool_pgnr = UINT_MAX; -module_param(dypool_pgnr, uint, 0644); -MODULE_PARM_DESC(dypool_pgnr, - "Set the dynamic memory pool size in page (default: unlimited)"); - -bool dypool_enable = true; -module_param(dypool_enable, bool, 0644); -MODULE_PARM_DESC(dypool_enable, - "dynamic memory pool enable/disable (default:enabled)"); - /* memory optimization: deferred firmware loading */ bool defer_fw_load; module_param(defer_fw_load, bool, 0644); @@ -1770,13 +1752,6 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); - hmm_init_mem_stat(repool_pgnr, dypool_enable, dypool_pgnr); - err = hmm_pool_register(repool_pgnr, HMM_POOL_TYPE_RESERVED); - if (err) { - dev_err(&pdev->dev, "Failed to register reserved memory pool.\n"); - goto hmm_pool_fail; - } - /* Init ISP memory management */ hmm_init(); @@ -1813,12 +1788,9 @@ css_init_fail: devm_free_irq(&pdev->dev, pdev->irq, isp); request_irq_fail: hmm_cleanup(); - hmm_pool_unregister(HMM_POOL_TYPE_RESERVED); -hmm_pool_fail: pm_runtime_get_noresume(&pdev->dev); destroy_workqueue(isp->wdt_work_queue); wdt_work_queue_fail: - atomisp_acc_cleanup(isp); atomisp_unregister_entities(isp); register_entities_fail: atomisp_uninitialize_modules(isp); @@ -1869,8 +1841,6 @@ static void atomisp_pci_remove(struct pci_dev *pdev) atomisp_drvfs_exit(); - atomisp_acc_cleanup(isp); - ia_css_unload_firmware(); hmm_cleanup(); @@ -1885,8 +1855,6 @@ static void atomisp_pci_remove(struct pci_dev *pdev) atomisp_file_input_cleanup(isp); release_firmware(isp->firmware); - - hmm_pool_unregister(HMM_POOL_TYPE_RESERVED); } static const struct pci_device_id atomisp_pci_tbl[] = { diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h index ee861ddb8e92..5660bd4221be 100644 --- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h +++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h @@ -25,7 +25,7 @@ * Simple queuing trace buffer for debug data * instantiatable in SP DMEM * - * The buffer has a remote and and a local store + * The buffer has a remote and a local store * which contain duplicate data (when in sync). * The buffers are automatically synched when the * user dequeues, or manualy using the synch function diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c index c1cda16f2dc0..fc6cfe9f7744 100644 --- a/drivers/staging/media/atomisp/pci/hmm/hmm.c +++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c @@ -28,7 +28,6 @@ #include <linux/sysfs.h> #include "hmm/hmm.h" -#include "hmm/hmm_pool.h" #include "hmm/hmm_bo.h" #include "atomisp_internal.h" @@ -37,11 +36,8 @@ #include "mmu/sh_mmu_mrfld.h" struct hmm_bo_device bo_device; -struct hmm_pool dynamic_pool; -struct hmm_pool reserved_pool; static ia_css_ptr dummy_ptr = mmgr_EXCEPTION; static bool hmm_initialized; -struct _hmm_mem_stat hmm_mem_stat; /* * p: private @@ -113,62 +109,13 @@ static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr, return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false); } -static ssize_t reserved_pool_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - ssize_t ret = 0; - - struct hmm_reserved_pool_info *pinfo = reserved_pool.pool_info; - unsigned long flags; - - if (!pinfo || !pinfo->initialized) - return 0; - - spin_lock_irqsave(&pinfo->list_lock, flags); - ret = scnprintf(buf, PAGE_SIZE, "%d out of %d pages available\n", - pinfo->index, pinfo->pgnr); - spin_unlock_irqrestore(&pinfo->list_lock, flags); - - if (ret > 0) - ret++; /* Add trailing zero, not included by scnprintf */ - - return ret; -}; - -static ssize_t dynamic_pool_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - ssize_t ret = 0; - - struct hmm_dynamic_pool_info *pinfo = dynamic_pool.pool_info; - unsigned long flags; - - if (!pinfo || !pinfo->initialized) - return 0; - - spin_lock_irqsave(&pinfo->list_lock, flags); - ret = scnprintf(buf, PAGE_SIZE, "%d (max %d) pages available\n", - pinfo->pgnr, pinfo->pool_size); - spin_unlock_irqrestore(&pinfo->list_lock, flags); - - if (ret > 0) - ret++; /* Add trailing zero, not included by scnprintf */ - - return ret; -}; static DEVICE_ATTR_RO(active_bo); static DEVICE_ATTR_RO(free_bo); -static DEVICE_ATTR_RO(reserved_pool); -static DEVICE_ATTR_RO(dynamic_pool); static struct attribute *sysfs_attrs_ctrl[] = { &dev_attr_active_bo.attr, &dev_attr_free_bo.attr, - &dev_attr_reserved_pool.attr, - &dev_attr_dynamic_pool.attr, NULL }; @@ -194,7 +141,7 @@ int hmm_init(void) * at the beginning, to avoid hmm_alloc return 0 in the * further allocation. */ - dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, NULL, 0); + dummy_ptr = hmm_alloc(1); if (!ret) { ret = sysfs_create_group(&atomisp_dev->kobj, @@ -221,17 +168,12 @@ void hmm_cleanup(void) hmm_initialized = false; } -ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type, - int from_highmem, const void __user *userptr, - const uint16_t attrs) +static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type, const void __user *userptr) { unsigned int pgnr; struct hmm_buffer_object *bo; - bool cached = attrs & ATOMISP_MAP_FLAG_CACHED; int ret; - WARN_ON(attrs & ATOMISP_MAP_FLAG_CONTIGUOUS); - /* * Check if we are initialized. In the ideal world we wouldn't need * this but we can tackle it once the driver is a lot cleaner @@ -250,7 +192,7 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type, } /* Allocate pages for memory */ - ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached); + ret = hmm_bo_alloc_pages(bo, type, userptr); if (ret) { dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n"); goto alloc_page_err; @@ -263,14 +205,9 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type, goto bind_err; } - hmm_mem_stat.tol_cnt += pgnr; - - if (attrs & ATOMISP_MAP_FLAG_CLEARED) - hmm_set(bo->start, 0, bytes); - dev_dbg(atomisp_dev, - "%s: pages: 0x%08x (%zu bytes), type: %d from highmem %d, user ptr %p, cached %d\n", - __func__, bo->start, bytes, type, from_highmem, userptr, cached); + "%s: pages: 0x%08x (%zu bytes), type: %d, user ptr %p\n", + __func__, bo->start, bytes, type, userptr); return bo->start; @@ -282,6 +219,16 @@ create_bo_err: return 0; } +ia_css_ptr hmm_alloc(size_t bytes) +{ + return __hmm_alloc(bytes, HMM_BO_PRIVATE, NULL); +} + +ia_css_ptr hmm_create_from_userdata(size_t bytes, const void __user *userptr) +{ + return __hmm_alloc(bytes, HMM_BO_USER, userptr); +} + void hmm_free(ia_css_ptr virt) { struct hmm_buffer_object *bo; @@ -300,8 +247,6 @@ void hmm_free(ia_css_ptr virt) return; } - hmm_mem_stat.tol_cnt -= bo->pgnr; - hmm_bo_unbind(bo); hmm_bo_free_pages(bo); hmm_bo_unref(bo); @@ -350,7 +295,7 @@ static int load_and_flush_by_kmap(ia_css_ptr virt, void *data, idx = (virt - bo->start) >> PAGE_SHIFT; offset = (virt - bo->start) - (idx << PAGE_SHIFT); - src = (char *)kmap(bo->page_obj[idx].page) + offset; + src = (char *)kmap_local_page(bo->pages[idx]) + offset; if ((bytes + offset) >= PAGE_SIZE) { len = PAGE_SIZE - offset; @@ -369,7 +314,7 @@ static int load_and_flush_by_kmap(ia_css_ptr virt, void *data, clflush_cache_range(src, len); - kunmap(bo->page_obj[idx].page); + kunmap_local(src); } return 0; @@ -482,10 +427,7 @@ int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes) idx = (virt - bo->start) >> PAGE_SHIFT; offset = (virt - bo->start) - (idx << PAGE_SHIFT); - if (in_atomic()) - des = (char *)kmap_atomic(bo->page_obj[idx].page); - else - des = (char *)kmap(bo->page_obj[idx].page); + des = (char *)kmap_local_page(bo->pages[idx]); if (!des) { dev_err(atomisp_dev, @@ -512,14 +454,7 @@ int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes) clflush_cache_range(des, len); - if (in_atomic()) - /* - * Note: kunmap_atomic requires return addr from - * kmap_atomic, not the page. See linux/highmem.h - */ - kunmap_atomic(des - offset); - else - kunmap(bo->page_obj[idx].page); + kunmap_local(des); } return 0; @@ -563,7 +498,7 @@ int hmm_set(ia_css_ptr virt, int c, unsigned int bytes) idx = (virt - bo->start) >> PAGE_SHIFT; offset = (virt - bo->start) - (idx << PAGE_SHIFT); - des = (char *)kmap(bo->page_obj[idx].page) + offset; + des = (char *)kmap_local_page(bo->pages[idx]) + offset; if ((bytes + offset) >= PAGE_SIZE) { len = PAGE_SIZE - offset; @@ -579,7 +514,7 @@ int hmm_set(ia_css_ptr virt, int c, unsigned int bytes) clflush_cache_range(des, len); - kunmap(bo->page_obj[idx].page); + kunmap_local(des); } return 0; @@ -602,7 +537,7 @@ phys_addr_t hmm_virt_to_phys(ia_css_ptr virt) idx = (virt - bo->start) >> PAGE_SHIFT; offset = (virt - bo->start) - (idx << PAGE_SHIFT); - return page_to_phys(bo->page_obj[idx].page) + offset; + return page_to_phys(bo->pages[idx]) + offset; } int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt) @@ -671,96 +606,3 @@ void hmm_vunmap(ia_css_ptr virt) hmm_bo_vunmap(bo); } - -int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type) -{ -#if 0 // Just use the "normal" pool - switch (pool_type) { - case HMM_POOL_TYPE_RESERVED: - reserved_pool.pops = &reserved_pops; - return reserved_pool.pops->pool_init(&reserved_pool.pool_info, - pool_size); - case HMM_POOL_TYPE_DYNAMIC: - dynamic_pool.pops = &dynamic_pops; - return dynamic_pool.pops->pool_init(&dynamic_pool.pool_info, - pool_size); - default: - dev_err(atomisp_dev, "invalid pool type.\n"); - return -EINVAL; - } -#else - return 0; -#endif -} - -void hmm_pool_unregister(enum hmm_pool_type pool_type) -{ -#if 0 // Just use the "normal" pool - switch (pool_type) { - case HMM_POOL_TYPE_RESERVED: - if (reserved_pool.pops && reserved_pool.pops->pool_exit) - reserved_pool.pops->pool_exit(&reserved_pool.pool_info); - break; - case HMM_POOL_TYPE_DYNAMIC: - if (dynamic_pool.pops && dynamic_pool.pops->pool_exit) - dynamic_pool.pops->pool_exit(&dynamic_pool.pool_info); - break; - default: - dev_err(atomisp_dev, "invalid pool type.\n"); - break; - } -#endif - - return; -} - -void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached) -{ - return hmm_vmap(ptr, cached); - /* vmunmap will be done in hmm_bo_release() */ -} - -ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr) -{ - struct hmm_buffer_object *bo; - - bo = hmm_bo_device_search_vmap_start(&bo_device, ptr); - if (bo) - return bo->start; - - dev_err(atomisp_dev, - "can not find buffer object whose kernel virtual address is %p\n", - ptr); - return 0; -} - -void hmm_show_mem_stat(const char *func, const int line) -{ - pr_info("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n", - hmm_mem_stat.tol_cnt, - hmm_mem_stat.usr_size, hmm_mem_stat.res_size, - hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size, - hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size); -} - -void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr) -{ - hmm_mem_stat.res_size = res_pgnr; - /* If reserved mem pool is not enabled, set its "mem stat" values as -1. */ - if (hmm_mem_stat.res_size == 0) { - hmm_mem_stat.res_size = -1; - hmm_mem_stat.res_cnt = -1; - } - - /* If dynamic memory pool is not enabled, set its "mem stat" values as -1. */ - if (!dyc_en) { - hmm_mem_stat.dyc_size = -1; - hmm_mem_stat.dyc_thr = -1; - } else { - hmm_mem_stat.dyc_size = 0; - hmm_mem_stat.dyc_thr = dyc_pgnr; - } - hmm_mem_stat.usr_size = 0; - hmm_mem_stat.sys_size = 0; - hmm_mem_stat.tol_cnt = 0; -} diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c index 0168f9839c90..f50494123f03 100644 --- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c +++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c @@ -42,7 +42,6 @@ #include "atomisp_internal.h" #include "hmm/hmm_common.h" -#include "hmm/hmm_pool.h" #include "hmm/hmm_bo.h" static unsigned int order_to_nr(unsigned int order) @@ -627,75 +626,31 @@ found: } static void free_private_bo_pages(struct hmm_buffer_object *bo, - struct hmm_pool *dypool, - struct hmm_pool *repool, int free_pgnr) { int i, ret; for (i = 0; i < free_pgnr; i++) { - switch (bo->page_obj[i].type) { - case HMM_PAGE_TYPE_RESERVED: - if (repool->pops - && repool->pops->pool_free_pages) { - repool->pops->pool_free_pages(repool->pool_info, - &bo->page_obj[i]); - hmm_mem_stat.res_cnt--; - } - break; - /* - * HMM_PAGE_TYPE_GENERAL indicates that pages are from system - * memory, so when free them, they should be put into dynamic - * pool. - */ - case HMM_PAGE_TYPE_DYNAMIC: - case HMM_PAGE_TYPE_GENERAL: - if (dypool->pops - && dypool->pops->pool_inited - && dypool->pops->pool_inited(dypool->pool_info)) { - if (dypool->pops->pool_free_pages) - dypool->pops->pool_free_pages( - dypool->pool_info, - &bo->page_obj[i]); - break; - } - - fallthrough; - + ret = set_pages_wb(bo->pages[i], 1); + if (ret) + dev_err(atomisp_dev, + "set page to WB err ...ret = %d\n", + ret); /* - * if dynamic memory pool doesn't exist, need to free - * pages to system directly. - */ - default: - ret = set_pages_wb(bo->page_obj[i].page, 1); - if (ret) - dev_err(atomisp_dev, - "set page to WB err ...ret = %d\n", - ret); - /* - W/A: set_pages_wb seldom return value = -EFAULT - indicate that address of page is not in valid - range(0xffff880000000000~0xffffc7ffffffffff) - then, _free_pages would panic; Do not know why page - address be valid,it maybe memory corruption by lowmemory - */ - if (!ret) { - __free_pages(bo->page_obj[i].page, 0); - hmm_mem_stat.sys_size--; - } - break; + W/A: set_pages_wb seldom return value = -EFAULT + indicate that address of page is not in valid + range(0xffff880000000000~0xffffc7ffffffffff) + then, _free_pages would panic; Do not know why page + address be valid,it maybe memory corruption by lowmemory + */ + if (!ret) { + __free_pages(bo->pages[i], 0); } } - - return; } /*Allocate pages which will be used only by ISP*/ -static int alloc_private_pages(struct hmm_buffer_object *bo, - int from_highmem, - bool cached, - struct hmm_pool *dypool, - struct hmm_pool *repool) +static int alloc_private_pages(struct hmm_buffer_object *bo) { int ret; unsigned int pgnr, order, blk_pgnr, alloc_pgnr; @@ -706,50 +661,11 @@ static int alloc_private_pages(struct hmm_buffer_object *bo, bool reduce_order = false; bool lack_mem = true; - if (from_highmem) - gfp |= __GFP_HIGHMEM; - pgnr = bo->pgnr; - bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object), - GFP_KERNEL); - if (unlikely(!bo->page_obj)) - return -ENOMEM; - i = 0; alloc_pgnr = 0; - /* - * get physical pages from dynamic pages pool. - */ - if (dypool->pops && dypool->pops->pool_alloc_pages) { - alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info, - bo->page_obj, pgnr, - cached); - hmm_mem_stat.dyc_size -= alloc_pgnr; - - if (alloc_pgnr == pgnr) - return 0; - } - - pgnr -= alloc_pgnr; - i += alloc_pgnr; - - /* - * get physical pages from reserved pages pool for atomisp. - */ - if (repool->pops && repool->pops->pool_alloc_pages) { - alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info, - &bo->page_obj[i], pgnr, - cached); - hmm_mem_stat.res_cnt += alloc_pgnr; - if (alloc_pgnr == pgnr) - return 0; - } - - pgnr -= alloc_pgnr; - i += alloc_pgnr; - while (pgnr) { order = nr_to_order_bottom(pgnr); /* @@ -804,28 +720,24 @@ retry: } else { blk_pgnr = order_to_nr(order); - if (!cached) { - /* - * set memory to uncacheable -- UC_MINUS - */ - ret = set_pages_uc(pages, blk_pgnr); - if (ret) { - dev_err(atomisp_dev, - "set page uncacheablefailed.\n"); + /* + * set memory to uncacheable -- UC_MINUS + */ + ret = set_pages_uc(pages, blk_pgnr); + if (ret) { + dev_err(atomisp_dev, + "set page uncacheablefailed.\n"); - __free_pages(pages, order); + __free_pages(pages, order); - goto cleanup; - } + goto cleanup; } - for (j = 0; j < blk_pgnr; j++) { - bo->page_obj[i].page = pages + j; - bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL; + for (j = 0; j < blk_pgnr; j++, i++) { + bo->pages[i] = pages + j; } pgnr -= blk_pgnr; - hmm_mem_stat.sys_size += blk_pgnr; /* * if order is not reduced this time, clear @@ -841,60 +753,31 @@ retry: return 0; cleanup: alloc_pgnr = i; - free_private_bo_pages(bo, dypool, repool, alloc_pgnr); - - kfree(bo->page_obj); - + free_private_bo_pages(bo, alloc_pgnr); return -ENOMEM; } -static void free_private_pages(struct hmm_buffer_object *bo, - struct hmm_pool *dypool, - struct hmm_pool *repool) -{ - free_private_bo_pages(bo, dypool, repool, bo->pgnr); - - kfree(bo->page_obj); -} - static void free_user_pages(struct hmm_buffer_object *bo, unsigned int page_nr) { int i; - hmm_mem_stat.usr_size -= bo->pgnr; - if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) { unpin_user_pages(bo->pages, page_nr); } else { for (i = 0; i < page_nr; i++) put_page(bo->pages[i]); } - kfree(bo->pages); - kfree(bo->page_obj); } /* * Convert user space virtual address into pages list */ static int alloc_user_pages(struct hmm_buffer_object *bo, - const void __user *userptr, bool cached) + const void __user *userptr) { int page_nr; - int i; struct vm_area_struct *vma; - struct page **pages; - - pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL); - if (unlikely(!pages)) - return -ENOMEM; - - bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object), - GFP_KERNEL); - if (unlikely(!bo->page_obj)) { - kfree(pages); - return -ENOMEM; - } mutex_unlock(&bo->mutex); mmap_read_lock(current->mm); @@ -902,8 +785,6 @@ static int alloc_user_pages(struct hmm_buffer_object *bo, mmap_read_unlock(current->mm); if (!vma) { dev_err(atomisp_dev, "find_vma failed\n"); - kfree(bo->page_obj); - kfree(pages); mutex_lock(&bo->mutex); return -EFAULT; } @@ -915,18 +796,16 @@ static int alloc_user_pages(struct hmm_buffer_object *bo, userptr = untagged_addr(userptr); - bo->pages = pages; - if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { page_nr = pin_user_pages((unsigned long)userptr, bo->pgnr, FOLL_LONGTERM | FOLL_WRITE, - pages, NULL); + bo->pages, NULL); bo->mem_type = HMM_BO_MEM_TYPE_PFN; } else { /*Handle frame buffer allocated in user space*/ mutex_unlock(&bo->mutex); page_nr = get_user_pages_fast((unsigned long)userptr, - (int)(bo->pgnr), 1, pages); + (int)(bo->pgnr), 1, bo->pages); mutex_lock(&bo->mutex); bo->mem_type = HMM_BO_MEM_TYPE_USER; } @@ -936,8 +815,6 @@ static int alloc_user_pages(struct hmm_buffer_object *bo, bo->pgnr, bo->mem_type == HMM_BO_MEM_TYPE_USER ? "user" : "pfn", page_nr); - hmm_mem_stat.usr_size += bo->pgnr; - /* can be written by caller, not forced */ if (page_nr != bo->pgnr) { dev_err(atomisp_dev, @@ -948,11 +825,6 @@ static int alloc_user_pages(struct hmm_buffer_object *bo, goto out_of_mem; } - for (i = 0; i < bo->pgnr; i++) { - bo->page_obj[i].page = pages[i]; - bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL; - } - return 0; out_of_mem: @@ -966,20 +838,14 @@ out_of_mem: * allocate/free physical pages for the bo. * * type indicate where are the pages from. currently we have 3 types - * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE. - * - * from_highmem is only valid when type is HMM_BO_PRIVATE, it will - * try to alloc memory from highmem if from_highmem is set. + * of memory: HMM_BO_PRIVATE, HMM_BO_USER. * * userptr is only valid when type is HMM_BO_USER, it indicates * the start address from user space task. - * - * from_highmem and userptr will both be ignored when type is - * HMM_BO_SHARE. */ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo, - enum hmm_bo_type type, int from_highmem, - const void __user *userptr, bool cached) + enum hmm_bo_type type, + const void __user *userptr) { int ret = -EINVAL; @@ -988,15 +854,20 @@ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo, mutex_lock(&bo->mutex); check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err); + bo->pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL); + if (unlikely(!bo->pages)) { + ret = -ENOMEM; + goto alloc_err; + } + /* * TO DO: * add HMM_BO_USER type */ if (type == HMM_BO_PRIVATE) { - ret = alloc_private_pages(bo, from_highmem, - cached, &dynamic_pool, &reserved_pool); + ret = alloc_private_pages(bo); } else if (type == HMM_BO_USER) { - ret = alloc_user_pages(bo, userptr, cached); + ret = alloc_user_pages(bo, userptr); } else { dev_err(atomisp_dev, "invalid buffer type.\n"); ret = -EINVAL; @@ -1013,6 +884,7 @@ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo, return 0; alloc_err: + kfree(bo->pages); mutex_unlock(&bo->mutex); dev_err(atomisp_dev, "alloc pages err...\n"); return ret; @@ -1038,11 +910,13 @@ void hmm_bo_free_pages(struct hmm_buffer_object *bo) bo->status &= (~HMM_BO_PAGE_ALLOCED); if (bo->type == HMM_BO_PRIVATE) - free_private_pages(bo, &dynamic_pool, &reserved_pool); + free_private_bo_pages(bo, bo->pgnr); else if (bo->type == HMM_BO_USER) free_user_pages(bo, bo->pgnr); else dev_err(atomisp_dev, "invalid buffer type.\n"); + + kfree(bo->pages); mutex_unlock(&bo->mutex); return; @@ -1061,32 +935,6 @@ int hmm_bo_page_allocated(struct hmm_buffer_object *bo) } /* - * get physical page info of the bo. - */ -int hmm_bo_get_page_info(struct hmm_buffer_object *bo, - struct hmm_page_object **page_obj, int *pgnr) -{ - check_bo_null_return(bo, -EINVAL); - - mutex_lock(&bo->mutex); - - check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err); - - *page_obj = bo->page_obj; - *pgnr = bo->pgnr; - - mutex_unlock(&bo->mutex); - - return 0; - -status_err: - dev_err(atomisp_dev, - "buffer object not page allocated yet.\n"); - mutex_unlock(&bo->mutex); - return -EINVAL; -} - -/* * bind the physical pages to a virtual address space. */ int hmm_bo_bind(struct hmm_buffer_object *bo) @@ -1113,7 +961,7 @@ int hmm_bo_bind(struct hmm_buffer_object *bo) for (i = 0; i < bo->pgnr; i++) { ret = isp_mmu_map(&bdev->mmu, virt, - page_to_phys(bo->page_obj[i].page), 1); + page_to_phys(bo->pages[i]), 1); if (ret) goto map_err; virt += (1 << PAGE_SHIFT); @@ -1227,9 +1075,6 @@ int hmm_bo_binded(struct hmm_buffer_object *bo) void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached) { - struct page **pages; - int i; - check_bo_null_return(bo, NULL); mutex_lock(&bo->mutex); @@ -1246,27 +1091,15 @@ void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached) bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED); } - pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL); - if (unlikely(!pages)) { - mutex_unlock(&bo->mutex); - return NULL; - } - - for (i = 0; i < bo->pgnr; i++) - pages[i] = bo->page_obj[i].page; - - bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP, + bo->vmap_addr = vmap(bo->pages, bo->pgnr, VM_MAP, cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE); if (unlikely(!bo->vmap_addr)) { - kfree(pages); mutex_unlock(&bo->mutex); dev_err(atomisp_dev, "vmap failed...\n"); return NULL; } bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED); - kfree(pages); - mutex_unlock(&bo->mutex); return bo->vmap_addr; } @@ -1396,7 +1229,7 @@ int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo) virt = vma->vm_start; for (i = 0; i < pgnr; i++) { - pfn = page_to_pfn(bo->page_obj[i].page); + pfn = page_to_pfn(bo->pages[i]); if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) { dev_warn(atomisp_dev, "remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n", diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_dynamic_pool.c b/drivers/staging/media/atomisp/pci/hmm/hmm_dynamic_pool.c deleted file mode 100644 index eaf97e5f3b68..000000000000 --- a/drivers/staging/media/atomisp/pci/hmm/hmm_dynamic_pool.c +++ /dev/null @@ -1,234 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -/* - * This file contains functions for dynamic memory pool management - */ -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/mm.h> - -#include <asm/set_memory.h> - -#include "atomisp_internal.h" - -#include "hmm/hmm_pool.h" - -/* - * dynamic memory pool ops. - */ -static unsigned int get_pages_from_dynamic_pool(void *pool, - struct hmm_page_object *page_obj, - unsigned int size, bool cached) -{ - struct hmm_page *hmm_page; - unsigned long flags; - unsigned int i = 0; - struct hmm_dynamic_pool_info *dypool_info = pool; - - if (!dypool_info) - return 0; - - spin_lock_irqsave(&dypool_info->list_lock, flags); - if (dypool_info->initialized) { - while (!list_empty(&dypool_info->pages_list)) { - hmm_page = list_entry(dypool_info->pages_list.next, - struct hmm_page, list); - - list_del(&hmm_page->list); - dypool_info->pgnr--; - spin_unlock_irqrestore(&dypool_info->list_lock, flags); - - page_obj[i].page = hmm_page->page; - page_obj[i++].type = HMM_PAGE_TYPE_DYNAMIC; - kmem_cache_free(dypool_info->pgptr_cache, hmm_page); - - if (i == size) - return i; - - spin_lock_irqsave(&dypool_info->list_lock, flags); - } - } - spin_unlock_irqrestore(&dypool_info->list_lock, flags); - - return i; -} - -static void free_pages_to_dynamic_pool(void *pool, - struct hmm_page_object *page_obj) -{ - struct hmm_page *hmm_page; - unsigned long flags; - int ret; - struct hmm_dynamic_pool_info *dypool_info = pool; - - if (!dypool_info) - return; - - spin_lock_irqsave(&dypool_info->list_lock, flags); - if (!dypool_info->initialized) { - spin_unlock_irqrestore(&dypool_info->list_lock, flags); - return; - } - spin_unlock_irqrestore(&dypool_info->list_lock, flags); - - if (page_obj->type == HMM_PAGE_TYPE_RESERVED) - return; - - if (dypool_info->pgnr >= dypool_info->pool_size) { - /* free page directly back to system */ - ret = set_pages_wb(page_obj->page, 1); - if (ret) - dev_err(atomisp_dev, - "set page to WB err ...ret=%d\n", ret); - /* - W/A: set_pages_wb seldom return value = -EFAULT - indicate that address of page is not in valid - range(0xffff880000000000~0xffffc7ffffffffff) - then, _free_pages would panic; Do not know why page - address be valid, it maybe memory corruption by lowmemory - */ - if (!ret) { - __free_pages(page_obj->page, 0); - hmm_mem_stat.sys_size--; - } - return; - } - hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache, - GFP_KERNEL); - if (!hmm_page) { - /* free page directly */ - ret = set_pages_wb(page_obj->page, 1); - if (ret) - dev_err(atomisp_dev, - "set page to WB err ...ret=%d\n", ret); - if (!ret) { - __free_pages(page_obj->page, 0); - hmm_mem_stat.sys_size--; - } - return; - } - - hmm_page->page = page_obj->page; - - /* - * add to pages_list of pages_pool - */ - spin_lock_irqsave(&dypool_info->list_lock, flags); - list_add_tail(&hmm_page->list, &dypool_info->pages_list); - dypool_info->pgnr++; - spin_unlock_irqrestore(&dypool_info->list_lock, flags); - hmm_mem_stat.dyc_size++; -} - -static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size) -{ - struct hmm_dynamic_pool_info *dypool_info; - - if (pool_size == 0) - return 0; - - dypool_info = kmalloc(sizeof(struct hmm_dynamic_pool_info), - GFP_KERNEL); - if (unlikely(!dypool_info)) - return -ENOMEM; - - dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache", - sizeof(struct hmm_page), 0, - SLAB_HWCACHE_ALIGN, NULL); - if (!dypool_info->pgptr_cache) { - kfree(dypool_info); - return -ENOMEM; - } - - INIT_LIST_HEAD(&dypool_info->pages_list); - spin_lock_init(&dypool_info->list_lock); - dypool_info->initialized = true; - dypool_info->pool_size = pool_size; - dypool_info->pgnr = 0; - - *pool = dypool_info; - - return 0; -} - -static void hmm_dynamic_pool_exit(void **pool) -{ - struct hmm_dynamic_pool_info *dypool_info = *pool; - struct hmm_page *hmm_page; - unsigned long flags; - int ret; - - if (!dypool_info) - return; - - spin_lock_irqsave(&dypool_info->list_lock, flags); - if (!dypool_info->initialized) { - spin_unlock_irqrestore(&dypool_info->list_lock, flags); - return; - } - dypool_info->initialized = false; - - while (!list_empty(&dypool_info->pages_list)) { - hmm_page = list_entry(dypool_info->pages_list.next, - struct hmm_page, list); - - list_del(&hmm_page->list); - spin_unlock_irqrestore(&dypool_info->list_lock, flags); - - /* can cause thread sleep, so cannot be put into spin_lock */ - ret = set_pages_wb(hmm_page->page, 1); - if (ret) - dev_err(atomisp_dev, - "set page to WB err...ret=%d\n", ret); - if (!ret) { - __free_pages(hmm_page->page, 0); - hmm_mem_stat.dyc_size--; - hmm_mem_stat.sys_size--; - } - kmem_cache_free(dypool_info->pgptr_cache, hmm_page); - spin_lock_irqsave(&dypool_info->list_lock, flags); - } - - spin_unlock_irqrestore(&dypool_info->list_lock, flags); - - kmem_cache_destroy(dypool_info->pgptr_cache); - - kfree(dypool_info); - - *pool = NULL; -} - -static int hmm_dynamic_pool_inited(void *pool) -{ - struct hmm_dynamic_pool_info *dypool_info = pool; - - if (!dypool_info) - return 0; - - return dypool_info->initialized; -} - -struct hmm_pool_ops dynamic_pops = { - .pool_init = hmm_dynamic_pool_init, - .pool_exit = hmm_dynamic_pool_exit, - .pool_alloc_pages = get_pages_from_dynamic_pool, - .pool_free_pages = free_pages_to_dynamic_pool, - .pool_inited = hmm_dynamic_pool_inited, -}; diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_reserved_pool.c b/drivers/staging/media/atomisp/pci/hmm/hmm_reserved_pool.c deleted file mode 100644 index 57525fece921..000000000000 --- a/drivers/staging/media/atomisp/pci/hmm/hmm_reserved_pool.c +++ /dev/null @@ -1,253 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -/* - * This file contains functions for reserved memory pool management - */ -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/mm.h> - -#include <asm/set_memory.h> - -#include "atomisp_internal.h" -#include "hmm/hmm_pool.h" - -/* - * reserved memory pool ops. - */ -static unsigned int get_pages_from_reserved_pool(void *pool, - struct hmm_page_object *page_obj, - unsigned int size, bool cached) -{ - unsigned long flags; - unsigned int i = 0; - unsigned int repool_pgnr; - int j; - struct hmm_reserved_pool_info *repool_info = pool; - - if (!repool_info) - return 0; - - spin_lock_irqsave(&repool_info->list_lock, flags); - if (repool_info->initialized) { - repool_pgnr = repool_info->index; - - for (j = repool_pgnr - 1; j >= 0; j--) { - page_obj[i].page = repool_info->pages[j]; - page_obj[i].type = HMM_PAGE_TYPE_RESERVED; - i++; - repool_info->index--; - if (i == size) - break; - } - } - spin_unlock_irqrestore(&repool_info->list_lock, flags); - return i; -} - -static void free_pages_to_reserved_pool(void *pool, - struct hmm_page_object *page_obj) -{ - unsigned long flags; - struct hmm_reserved_pool_info *repool_info = pool; - - if (!repool_info) - return; - - spin_lock_irqsave(&repool_info->list_lock, flags); - - if (repool_info->initialized && - repool_info->index < repool_info->pgnr && - page_obj->type == HMM_PAGE_TYPE_RESERVED) { - repool_info->pages[repool_info->index++] = page_obj->page; - } - - spin_unlock_irqrestore(&repool_info->list_lock, flags); -} - -static int hmm_reserved_pool_setup(struct hmm_reserved_pool_info **repool_info, - unsigned int pool_size) -{ - struct hmm_reserved_pool_info *pool_info; - - pool_info = kmalloc(sizeof(struct hmm_reserved_pool_info), - GFP_KERNEL); - if (unlikely(!pool_info)) - return -ENOMEM; - - pool_info->pages = kmalloc(sizeof(struct page *) * pool_size, - GFP_KERNEL); - if (unlikely(!pool_info->pages)) { - kfree(pool_info); - return -ENOMEM; - } - - pool_info->index = 0; - pool_info->pgnr = 0; - spin_lock_init(&pool_info->list_lock); - pool_info->initialized = true; - - *repool_info = pool_info; - - return 0; -} - -static int hmm_reserved_pool_init(void **pool, unsigned int pool_size) -{ - int ret; - unsigned int blk_pgnr; - unsigned int pgnr = pool_size; - unsigned int order = 0; - unsigned int i = 0; - int fail_number = 0; - struct page *pages; - int j; - struct hmm_reserved_pool_info *repool_info; - - if (pool_size == 0) - return 0; - - ret = hmm_reserved_pool_setup(&repool_info, pool_size); - if (ret) { - dev_err(atomisp_dev, "hmm_reserved_pool_setup failed.\n"); - return ret; - } - - pgnr = pool_size; - - i = 0; - order = MAX_ORDER; - - while (pgnr) { - blk_pgnr = 1U << order; - while (blk_pgnr > pgnr) { - order--; - blk_pgnr >>= 1U; - } - BUG_ON(order > MAX_ORDER); - - pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN, order); - if (unlikely(!pages)) { - if (order == 0) { - fail_number++; - dev_err(atomisp_dev, "%s: alloc_pages failed: %d\n", - __func__, fail_number); - /* if fail five times, will goto end */ - - /* FIXME: whether is the mechanism is ok? */ - if (fail_number == ALLOC_PAGE_FAIL_NUM) - goto end; - } else { - order--; - } - } else { - blk_pgnr = 1U << order; - - ret = set_pages_uc(pages, blk_pgnr); - if (ret) { - dev_err(atomisp_dev, - "set pages uncached failed\n"); - __free_pages(pages, order); - goto end; - } - - for (j = 0; j < blk_pgnr; j++) - repool_info->pages[i++] = pages + j; - - repool_info->index += blk_pgnr; - repool_info->pgnr += blk_pgnr; - - pgnr -= blk_pgnr; - - fail_number = 0; - } - } - -end: - repool_info->initialized = true; - - *pool = repool_info; - - dev_info(atomisp_dev, - "hmm_reserved_pool init successfully,hmm_reserved_pool is with %d pages.\n", - repool_info->pgnr); - return 0; -} - -static void hmm_reserved_pool_exit(void **pool) -{ - unsigned long flags; - int i, ret; - unsigned int pgnr; - struct hmm_reserved_pool_info *repool_info = *pool; - - if (!repool_info) - return; - - spin_lock_irqsave(&repool_info->list_lock, flags); - if (!repool_info->initialized) { - spin_unlock_irqrestore(&repool_info->list_lock, flags); - return; - } - pgnr = repool_info->pgnr; - repool_info->index = 0; - repool_info->pgnr = 0; - repool_info->initialized = false; - spin_unlock_irqrestore(&repool_info->list_lock, flags); - - for (i = 0; i < pgnr; i++) { - ret = set_pages_wb(repool_info->pages[i], 1); - if (ret) - dev_err(atomisp_dev, - "set page to WB err...ret=%d\n", ret); - /* - W/A: set_pages_wb seldom return value = -EFAULT - indicate that address of page is not in valid - range(0xffff880000000000~0xffffc7ffffffffff) - then, _free_pages would panic; Do not know why - page address be valid, it maybe memory corruption by lowmemory - */ - if (!ret) - __free_pages(repool_info->pages[i], 0); - } - - kfree(repool_info->pages); - kfree(repool_info); - - *pool = NULL; -} - -static int hmm_reserved_pool_inited(void *pool) -{ - struct hmm_reserved_pool_info *repool_info = pool; - - if (!repool_info) - return 0; - - return repool_info->initialized; -} - -struct hmm_pool_ops reserved_pops = { - .pool_init = hmm_reserved_pool_init, - .pool_exit = hmm_reserved_pool_exit, - .pool_alloc_pages = get_pages_from_reserved_pool, - .pool_free_pages = free_pages_to_reserved_pool, - .pool_inited = hmm_reserved_pool_inited, -}; diff --git a/drivers/staging/media/atomisp/pci/ia_css_frame_public.h b/drivers/staging/media/atomisp/pci/ia_css_frame_public.h index 96c86f0dc81c..514d933f934d 100644 --- a/drivers/staging/media/atomisp/pci/ia_css_frame_public.h +++ b/drivers/staging/media/atomisp/pci/ia_css_frame_public.h @@ -169,7 +169,6 @@ struct ia_css_frame { /** exposure id, see ia_css_event_public.h for more detail */ u32 isp_config_id; /** Unique ID to track which config was actually applied to a particular frame */ bool valid; /** First video output frame is not valid */ - bool contiguous; /** memory is allocated physically contiguously */ union { unsigned int _initialisation_dummy; struct ia_css_frame_plane raw; @@ -245,44 +244,6 @@ ia_css_frame_allocate_from_info(struct ia_css_frame **frame, void ia_css_frame_free(struct ia_css_frame *frame); -/* @brief Allocate a contiguous CSS frame structure - * - * @param frame The allocated frame. - * @param width The width (in pixels) of the frame. - * @param height The height (in lines) of the frame. - * @param format The frame format. - * @param stride The padded stride, in pixels. - * @param raw_bit_depth The raw bit depth, in bits. - * @return The error code. - * - * Contiguous frame allocation, only for FPGA display driver which needs - * physically contiguous memory. - * Deprecated. - */ -int -ia_css_frame_allocate_contiguous(struct ia_css_frame **frame, - unsigned int width, - unsigned int height, - enum ia_css_frame_format format, - unsigned int stride, - unsigned int raw_bit_depth); - -/* @brief Allocate a contiguous CSS frame from a frame info structure. - * - * @param frame The allocated frame. - * @param[in] info The frame info structure. - * @return The error code. - * - * Allocate a frame using the resolution and format from a frame info struct. - * This is a convenience function, implemented on top of - * ia_css_frame_allocate_contiguous(). - * Only for FPGA display driver which needs physically contiguous memory. - * Deprecated. - */ -int -ia_css_frame_allocate_contiguous_from_info(struct ia_css_frame **frame, - const struct ia_css_frame_info *info); - /* @brief Allocate a CSS frame structure using a frame info structure. * * @param frame The allocated frame. @@ -334,7 +295,6 @@ int ia_css_frame_map(struct ia_css_frame **frame, const struct ia_css_frame_info *info, const void __user *data, - u16 attribute, unsigned int pgnr); /* @brief Unmap a CSS frame structure. diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c index 13caa55fd51a..bf0a768f8fe1 100644 --- a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c +++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c @@ -331,7 +331,7 @@ ia_css_isp_dvs_statistics_allocate( HIVE_ISP_DDR_WORD_BYTES); me->size = hor_size + ver_size; - me->data_ptr = hmm_alloc(me->size, HMM_BO_PRIVATE, 0, NULL, 0); + me->data_ptr = hmm_alloc(me->size); if (me->data_ptr == mmgr_NULL) goto err; me->hor_size = hor_size; diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c index f608740e8340..c13de289a3db 100644 --- a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c +++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c @@ -294,7 +294,7 @@ ia_css_isp_dvs2_statistics_allocate( * grid->aligned_height * IA_CSS_DVS2_NUM_COEF_TYPES; me->size = 2 * size; - me->data_ptr = hmm_alloc(me->size, HMM_BO_PRIVATE, 0, NULL, 0); + me->data_ptr = hmm_alloc(me->size); if (me->data_ptr == mmgr_NULL) goto err; me->hor_proj = me->data_ptr; diff --git a/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h b/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h index bfe4f5976771..73432dc35ae3 100644 --- a/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h +++ b/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h @@ -145,12 +145,6 @@ more details. #define RAW_BUF_LINES ((ENABLE_RAW_BINNING || ENABLE_FIXED_BAYER_DS) ? 4 : 2) -#define RAW_BUF_STRIDE \ - (BINARY_ID == SH_CSS_BINARY_ID_POST_ISP ? MAX_VECTORS_PER_INPUT_CHUNK : \ - ISP_NUM_STRIPES > 1 ? MAX_VECTORS_PER_INPUT_STRIPE + _ISP_EXTRA_PADDING_VECS : \ - !ENABLE_CONTINUOUS ? MAX_VECTORS_PER_INPUT_LINE : \ - MAX_VECTORS_PER_INPUT_CHUNK) - /* [isp vmem] table size[vectors] per line per color (GR,R,B,GB), multiples of NWAY */ #define ISP2400_SCTBL_VECTORS_PER_LINE_PER_COLOR \ diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c b/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c index f46238725eea..3d269bd23207 100644 --- a/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c +++ b/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c @@ -1305,8 +1305,6 @@ void ia_css_debug_frame_print(const struct ia_css_frame *frame, ia_css_debug_dtrace(2, " padded width = %d\n", frame->info.padded_width); ia_css_debug_dtrace(2, " format = %d\n", frame->info.format); - ia_css_debug_dtrace(2, " is contiguous = %s\n", - frame->contiguous ? "yes" : "no"); switch (frame->info.format) { case IA_CSS_FRAME_FORMAT_NV12: case IA_CSS_FRAME_FORMAT_NV16: diff --git a/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h b/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h index c756a134efc3..700070c58eda 100644 --- a/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h +++ b/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h @@ -109,16 +109,13 @@ void ia_css_frame_free_multiple(unsigned int num_frames, * * @param frame The allocated frame. * @param[in] size_bytes The frame size in bytes. - * @param[in] contiguous Allocate memory physically contiguously or not. * @return The error code. * * Allocate a frame using the given size in bytes. * The frame structure is partially null initialized. */ -int ia_css_frame_allocate_with_buffer_size( - struct ia_css_frame **frame, - const unsigned int size_bytes, - const bool contiguous); +int ia_css_frame_allocate_with_buffer_size(struct ia_css_frame **frame, + const unsigned int size_bytes); /* @brief Check whether 2 frames are same type * diff --git a/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c b/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c index a3aae638b0bf..5a7058320ee6 100644 --- a/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c +++ b/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c @@ -48,12 +48,6 @@ static void frame_init_raw_single_plane( unsigned int subpixels_per_line, unsigned int bits_per_pixel); -static void frame_init_mipi_plane(struct ia_css_frame *frame, - struct ia_css_frame_plane *plane, - unsigned int height, - unsigned int subpixels_per_line, - unsigned int bytes_per_pixel); - static void frame_init_nv_planes(struct ia_css_frame *frame, unsigned int horizontal_decimation, unsigned int vertical_decimation, @@ -77,15 +71,13 @@ static int frame_allocate_with_data(struct ia_css_frame **frame, unsigned int height, enum ia_css_frame_format format, unsigned int padded_width, - unsigned int raw_bit_depth, - bool contiguous); + unsigned int raw_bit_depth); static struct ia_css_frame *frame_create(unsigned int width, unsigned int height, enum ia_css_frame_format format, unsigned int padded_width, unsigned int raw_bit_depth, - bool contiguous, bool valid); static unsigned @@ -137,7 +129,7 @@ int ia_css_frame_allocate(struct ia_css_frame **frame, width, height, format, padded_width, raw_bit_depth); err = frame_allocate_with_data(frame, width, height, format, - padded_width, raw_bit_depth, false); + padded_width, raw_bit_depth); if ((*frame) && err == 0) ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, @@ -154,7 +146,6 @@ int ia_css_frame_allocate(struct ia_css_frame **frame, int ia_css_frame_map(struct ia_css_frame **frame, const struct ia_css_frame_info *info, const void __user *data, - u16 attribute, unsigned int pgnr) { int err = 0; @@ -180,9 +171,7 @@ int ia_css_frame_map(struct ia_css_frame **frame, goto error; } - me->data = hmm_alloc(me->data_bytes, HMM_BO_USER, 0, data, - attribute & ATOMISP_MAP_FLAG_CACHED); - + me->data = hmm_create_from_userdata(me->data_bytes, data); if (me->data == mmgr_NULL) err = -EINVAL; @@ -216,7 +205,6 @@ int ia_css_frame_create_from_info(struct ia_css_frame **frame, info->format, info->padded_width, info->raw_bit_depth, - false, false); if (!me) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, @@ -270,49 +258,6 @@ int ia_css_frame_set_data(struct ia_css_frame *frame, return err; } -int ia_css_frame_allocate_contiguous(struct ia_css_frame **frame, - unsigned int width, - unsigned int height, - enum ia_css_frame_format format, - unsigned int padded_width, - unsigned int raw_bit_depth) -{ - int err = 0; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_allocate_contiguous() enter: width=%d, height=%d, format=%d, padded_width=%d, raw_bit_depth=%d\n", - width, height, format, padded_width, raw_bit_depth); - - err = frame_allocate_with_data(frame, width, height, format, - padded_width, raw_bit_depth, true); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_allocate_contiguous() leave: frame=%p\n", - frame ? *frame : (void *)-1); - - return err; -} - -int ia_css_frame_allocate_contiguous_from_info( - struct ia_css_frame **frame, - const struct ia_css_frame_info *info) -{ - int err = 0; - - assert(frame); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_allocate_contiguous_from_info() enter:\n"); - err = ia_css_frame_allocate_contiguous(frame, - info->res.width, - info->res.height, - info->format, - info->padded_width, - info->raw_bit_depth); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_allocate_contiguous_from_info() leave:\n"); - return err; -} - void ia_css_frame_free(struct ia_css_frame *frame) { IA_CSS_ENTER_PRIVATE("frame = %p", frame); @@ -343,11 +288,9 @@ int ia_css_frame_init_planes(struct ia_css_frame *frame) switch (frame->info.format) { case IA_CSS_FRAME_FORMAT_MIPI: - frame_init_mipi_plane(frame, &frame->planes.raw, - frame->info.res.height, - frame->info.padded_width, - frame->info.raw_bit_depth <= 8 ? 1 : 2); - break; + dev_err(atomisp_dev, + "%s: unexpected use of IA_CSS_FRAME_FORMAT_MIPI\n", __func__); + return -EINVAL; case IA_CSS_FRAME_FORMAT_RAW_PACKED: frame_init_raw_single_plane(frame, &frame->planes.raw, frame->info.res.height, @@ -460,10 +403,7 @@ void ia_css_frame_info_set_width(struct ia_css_frame_info *info, IA_CSS_LEAVE_PRIVATE(""); return; } - if (min_padded_width > width) - align = min_padded_width; - else - align = width; + align = max(min_padded_width, width); info->res.width = width; /* frames with a U and V plane of 8 bits per pixel need to have @@ -529,16 +469,14 @@ void ia_css_frame_free_multiple(unsigned int num_frames, } } -int ia_css_frame_allocate_with_buffer_size( - struct ia_css_frame **frame, - const unsigned int buffer_size_bytes, - const bool contiguous) +int ia_css_frame_allocate_with_buffer_size(struct ia_css_frame **frame, + const unsigned int buffer_size_bytes) { /* AM: Body coppied from frame_allocate_with_data(). */ int err; struct ia_css_frame *me = frame_create(0, 0, IA_CSS_FRAME_FORMAT_NUM,/* Not valid format yet */ - 0, 0, contiguous, false); + 0, 0, false); if (!me) return -ENOMEM; @@ -670,22 +608,6 @@ static void frame_init_raw_single_plane( return; } -static void frame_init_mipi_plane(struct ia_css_frame *frame, - struct ia_css_frame_plane *plane, - unsigned int height, - unsigned int subpixels_per_line, - unsigned int bytes_per_pixel) -{ - unsigned int stride; - - stride = subpixels_per_line * bytes_per_pixel; - frame->data_bytes = 8388608; /* 8*1024*1024 */ - frame->valid = false; - frame->contiguous = true; - frame_init_plane(plane, subpixels_per_line, stride, height, 0); - return; -} - static void frame_init_nv_planes(struct ia_css_frame *frame, unsigned int horizontal_decimation, unsigned int vertical_decimation, @@ -803,11 +725,7 @@ static int frame_allocate_buffer_data(struct ia_css_frame *frame) #ifdef ISP2401 IA_CSS_ENTER_LEAVE_PRIVATE("frame->data_bytes=%d\n", frame->data_bytes); #endif - frame->data = hmm_alloc(frame->data_bytes, - HMM_BO_PRIVATE, 0, NULL, - frame->contiguous ? - ATOMISP_MAP_FLAG_CONTIGUOUS : 0); - + frame->data = hmm_alloc(frame->data_bytes); if (frame->data == mmgr_NULL) return -ENOMEM; return 0; @@ -818,8 +736,7 @@ static int frame_allocate_with_data(struct ia_css_frame **frame, unsigned int height, enum ia_css_frame_format format, unsigned int padded_width, - unsigned int raw_bit_depth, - bool contiguous) + unsigned int raw_bit_depth) { int err; struct ia_css_frame *me = frame_create(width, @@ -827,7 +744,6 @@ static int frame_allocate_with_data(struct ia_css_frame **frame, format, padded_width, raw_bit_depth, - contiguous, true); if (!me) @@ -857,7 +773,6 @@ static struct ia_css_frame *frame_create(unsigned int width, enum ia_css_frame_format format, unsigned int padded_width, unsigned int raw_bit_depth, - bool contiguous, bool valid) { struct ia_css_frame *me = kvmalloc(sizeof(*me), GFP_KERNEL); @@ -871,7 +786,6 @@ static struct ia_css_frame *frame_create(unsigned int width, me->info.format = format; me->info.padded_width = padded_width; me->info.raw_bit_depth = raw_bit_depth; - me->contiguous = contiguous; me->valid = valid; me->data_bytes = 0; me->data = mmgr_NULL; diff --git a/drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c b/drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c index 823ec54b6281..99c2f3a533ab 100644 --- a/drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c +++ b/drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c @@ -131,7 +131,7 @@ ia_css_isp_param_allocate_isp_parameters( goto cleanup; } if (pclass != IA_CSS_PARAM_CLASS_PARAM) { - css_params->params[pclass][mem].address = hmm_alloc(size, HMM_BO_PRIVATE, 0, NULL, 0); + css_params->params[pclass][mem].address = hmm_alloc(size); if (!css_params->params[pclass][mem].address) { err = -ENOMEM; goto cleanup; diff --git a/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c b/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c index 39604752785b..2e07dab8bf51 100644 --- a/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c +++ b/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c @@ -254,14 +254,15 @@ void rmgr_pop_handle(struct ia_css_rmgr_vbuf_pool *pool, void ia_css_rmgr_acq_vbuf(struct ia_css_rmgr_vbuf_pool *pool, struct ia_css_rmgr_vbuf_handle **handle) { - struct ia_css_rmgr_vbuf_handle h = { 0 }; - if ((!pool) || (!handle) || (!*handle)) { IA_CSS_LOG("Invalid inputs"); return; } if (pool->copy_on_write) { + struct ia_css_rmgr_vbuf_handle *new_handle; + struct ia_css_rmgr_vbuf_handle h = { 0 }; + /* only one reference, reuse (no new retain) */ if ((*handle)->count == 1) return; @@ -272,23 +273,29 @@ void ia_css_rmgr_acq_vbuf(struct ia_css_rmgr_vbuf_pool *pool, h.size = (*handle)->size; /* release ref to current buffer */ ia_css_rmgr_refcount_release_vbuf(handle); - **handle = h; + new_handle = &h; + } else { + new_handle = *handle; } /* get new buffer for needed size */ - if ((*handle)->vptr == 0x0) { + if (new_handle->vptr == 0x0) { if (pool->recycle) { /* try and pop from pool */ - rmgr_pop_handle(pool, handle); + rmgr_pop_handle(pool, &new_handle); } - if ((*handle)->vptr == 0x0) { + if (new_handle->vptr == 0x0) { /* we need to allocate */ - (*handle)->vptr = hmm_alloc((*handle)->size, - HMM_BO_PRIVATE, 0, NULL, 0); + new_handle->vptr = hmm_alloc(new_handle->size); } else { /* we popped a buffer */ + *handle = new_handle; return; } } + /* Note that new_handle will change to an internally maintained one */ + ia_css_rmgr_refcount_retain_vbuf(&new_handle); + *handle = new_handle; + return; } /* Note that handle will change to an internally maintained one */ ia_css_rmgr_refcount_retain_vbuf(handle); diff --git a/drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c b/drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c index 7f4592565af6..c34bfc5f970d 100644 --- a/drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c +++ b/drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c @@ -64,7 +64,7 @@ int ia_css_spctrl_load_fw(sp_ID_t sp_id, ia_css_spctrl_cfg *spctrl_cfg) * Data used to be stored separately, because of access alignment constraints, * fix the FW generation instead */ - code_addr = hmm_alloc(spctrl_cfg->code_size, HMM_BO_PRIVATE, 0, NULL, 0); + code_addr = hmm_alloc(spctrl_cfg->code_size); if (code_addr == mmgr_NULL) return -ENOMEM; hmm_store(code_addr, spctrl_cfg->code, spctrl_cfg->code_size); diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c index 1d605e533e29..da96aaffebc1 100644 --- a/drivers/staging/media/atomisp/pci/sh_css.c +++ b/drivers/staging/media/atomisp/pci/sh_css.c @@ -3061,7 +3061,6 @@ init_vf_frameinfo_defaults(struct ia_css_pipe *pipe, assert(vf_frame); sh_css_pipe_get_viewfinder_frame_info(pipe, &vf_frame->info, idx); - vf_frame->contiguous = false; vf_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE; ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx, thread_id, &queue_id); @@ -3243,7 +3242,6 @@ init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe, in_frame->info.raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe); ia_css_frame_info_set_width(&in_frame->info, pipe->stream->config.input_config.input_res.width, 0); - in_frame->contiguous = false; in_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE; ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_INPUT_FRAME, thread_id, &queue_id); @@ -3271,7 +3269,6 @@ init_out_frameinfo_defaults(struct ia_css_pipe *pipe, assert(out_frame); sh_css_pipe_get_output_frame_info(pipe, &out_frame->info, idx); - out_frame->contiguous = false; out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE; ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx, thread_id, &queue_id); @@ -3510,8 +3507,7 @@ create_host_acc_pipeline(struct ia_css_pipe *pipe) if (pipe->config.acc_extension) pipe->pipeline.pipe_qos_config = 0; - fw = pipe->vf_stage; - for (i = 0; fw; fw = fw->next) { + for (fw = pipe->vf_stage; fw; fw = fw->next) { err = sh_css_pipeline_add_acc_stage(&pipe->pipeline, fw); if (err) goto ERR; @@ -7158,7 +7154,6 @@ create_host_copy_pipeline(struct ia_css_pipe *pipe, ia_css_pipeline_clean(me); /* Construct out_frame info */ - out_frame->contiguous = false; out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE; if (copy_on_sp(pipe) && @@ -7208,7 +7203,6 @@ create_host_isyscopy_capture_pipeline(struct ia_css_pipe *pipe) err = sh_css_pipe_get_output_frame_info(pipe, &out_frame->info, 0); if (err) return err; - out_frame->contiguous = false; out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE; ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, thread_id, &queue_id); diff --git a/drivers/staging/media/atomisp/pci/sh_css_firmware.c b/drivers/staging/media/atomisp/pci/sh_css_firmware.c index dd688f8ab649..e7ef578db8ab 100644 --- a/drivers/staging/media/atomisp/pci/sh_css_firmware.c +++ b/drivers/staging/media/atomisp/pci/sh_css_firmware.c @@ -369,7 +369,7 @@ void sh_css_unload_firmware(void) ia_css_ptr sh_css_load_blob(const unsigned char *blob, unsigned int size) { - ia_css_ptr target_addr = hmm_alloc(size, HMM_BO_PRIVATE, 0, NULL, 0); + ia_css_ptr target_addr = hmm_alloc(size); /* * this will allocate memory aligned to a DDR word boundary which * is required for the CSS DMA to read the instructions. diff --git a/drivers/staging/media/atomisp/pci/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/sh_css_mipi.c index 0acf75497ae7..bc6e8598a776 100644 --- a/drivers/staging/media/atomisp/pci/sh_css_mipi.c +++ b/drivers/staging/media/atomisp/pci/sh_css_mipi.c @@ -431,8 +431,7 @@ allocate_mipi_frames(struct ia_css_pipe *pipe, /* allocate new frame */ err = ia_css_frame_allocate_with_buffer_size( &my_css.mipi_frames[port][i], - my_css.mipi_frame_size[port] * HIVE_ISP_DDR_WORD_BYTES, - false); + my_css.mipi_frame_size[port] * HIVE_ISP_DDR_WORD_BYTES); if (err) { for (j = 0; j < i; j++) { if (my_css.mipi_frames[port][j]) { diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.c b/drivers/staging/media/atomisp/pci/sh_css_params.c index 09f87c285b8d..0e7c38b2bfe3 100644 --- a/drivers/staging/media/atomisp/pci/sh_css_params.c +++ b/drivers/staging/media/atomisp/pci/sh_css_params.c @@ -2072,8 +2072,7 @@ static bool realloc_isp_css_mm_buf( size_t *curr_size, size_t needed_size, bool force, - int *err, - uint16_t mmgr_attribute) + int *err) { s32 id; @@ -2095,11 +2094,7 @@ static bool realloc_isp_css_mm_buf( id = IA_CSS_REFCOUNT_PARAM_BUFFER; ia_css_refcount_decrement(id, *curr_buf); - *curr_buf = ia_css_refcount_increment(id, hmm_alloc(needed_size, - HMM_BO_PRIVATE, 0, - NULL, - mmgr_attribute)); - + *curr_buf = ia_css_refcount_increment(id, hmm_alloc(needed_size)); if (!*curr_buf) { *err = -ENOMEM; *curr_size = 0; @@ -2122,7 +2117,7 @@ static bool reallocate_buffer( IA_CSS_ENTER_PRIVATE("void"); ret = realloc_isp_css_mm_buf(curr_buf, - curr_size, needed_size, force, err, 0); + curr_size, needed_size, force, err); IA_CSS_LEAVE_PRIVATE("ret=%d", ret); return ret; @@ -2161,7 +2156,7 @@ ia_css_isp_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid) me->hmem_size = CEIL_MUL(me->hmem_size, HIVE_ISP_DDR_WORD_BYTES); me->size = me->dmem_size + me->vmem_size * 2 + me->hmem_size; - me->data_ptr = hmm_alloc(me->size, HMM_BO_PRIVATE, 0, NULL, 0); + me->data_ptr = hmm_alloc(me->size); if (me->data_ptr == mmgr_NULL) { kvfree(me); me = NULL; @@ -2211,7 +2206,7 @@ ia_css_metadata_allocate(const struct ia_css_metadata_info *metadata_info) md->info = *metadata_info; md->exp_id = 0; - md->address = hmm_alloc(metadata_info->size, HMM_BO_PRIVATE, 0, NULL, 0); + md->address = hmm_alloc(metadata_info->size); if (md->address == mmgr_NULL) goto error; @@ -2364,13 +2359,13 @@ sh_css_create_isp_params(struct ia_css_stream *stream, ddr_ptrs_size->isp_param = params_size; ddr_ptrs->isp_param = ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER, - hmm_alloc(params_size, HMM_BO_PRIVATE, 0, NULL, 0)); + hmm_alloc(params_size)); succ &= (ddr_ptrs->isp_param != mmgr_NULL); ddr_ptrs_size->macc_tbl = sizeof(struct ia_css_macc_table); ddr_ptrs->macc_tbl = ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER, - hmm_alloc(sizeof(struct ia_css_macc_table), HMM_BO_PRIVATE, 0, NULL, 0)); + hmm_alloc(sizeof(struct ia_css_macc_table))); succ &= (ddr_ptrs->macc_tbl != mmgr_NULL); *isp_params_out = params; @@ -2584,14 +2579,10 @@ sh_css_params_init(void) for (i = 0; i < SH_CSS_MAX_STAGES; i++) { xmem_sp_stage_ptrs[p][i] = ia_css_refcount_increment(-1, - hmm_alloc(sizeof(struct sh_css_sp_stage), - HMM_BO_PRIVATE, 0, NULL, - ATOMISP_MAP_FLAG_CLEARED)); + hmm_alloc(sizeof(struct sh_css_sp_stage))); xmem_isp_stage_ptrs[p][i] = ia_css_refcount_increment(-1, - hmm_alloc(sizeof(struct sh_css_sp_stage), - HMM_BO_PRIVATE, 0, NULL, - ATOMISP_MAP_FLAG_CLEARED)); + hmm_alloc(sizeof(struct sh_css_sp_stage))); if ((xmem_sp_stage_ptrs[p][i] == mmgr_NULL) || (xmem_isp_stage_ptrs[p][i] == mmgr_NULL)) { @@ -2599,6 +2590,9 @@ sh_css_params_init(void) IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM); return -ENOMEM; } + + hmm_set(xmem_sp_stage_ptrs[p][i], 0, sizeof(struct sh_css_sp_stage)); + hmm_set(xmem_isp_stage_ptrs[p][i], 0, sizeof(struct sh_css_sp_stage)); } } @@ -2609,13 +2603,9 @@ sh_css_params_init(void) sp_ddr_ptrs = ia_css_refcount_increment(-1, hmm_alloc(CEIL_MUL(sizeof(struct sh_css_ddr_address_map), - HIVE_ISP_DDR_WORD_BYTES), - HMM_BO_PRIVATE, 0, NULL, - ATOMISP_MAP_FLAG_CLEARED)); + HIVE_ISP_DDR_WORD_BYTES))); xmem_sp_group_ptrs = ia_css_refcount_increment(-1, - hmm_alloc(sizeof(struct sh_css_sp_group), - HMM_BO_PRIVATE, 0, NULL, - ATOMISP_MAP_FLAG_CLEARED)); + hmm_alloc(sizeof(struct sh_css_sp_group))); if ((sp_ddr_ptrs == mmgr_NULL) || (xmem_sp_group_ptrs == mmgr_NULL)) { @@ -2623,6 +2613,9 @@ sh_css_params_init(void) IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM); return -ENOMEM; } + hmm_set(sp_ddr_ptrs, 0, CEIL_MUL(sizeof(struct sh_css_ddr_address_map), + HIVE_ISP_DDR_WORD_BYTES)); + hmm_set(xmem_sp_group_ptrs, 0, sizeof(struct sh_css_sp_group)); IA_CSS_LEAVE_ERR_PRIVATE(0); return 0; } @@ -2667,7 +2660,7 @@ int ia_css_pipe_set_bci_scaler_lut(struct ia_css_pipe *pipe, } if (!stream_started) { - pipe->scaler_pp_lut = hmm_alloc(sizeof(zoom_table), HMM_BO_PRIVATE, 0, NULL, 0); + pipe->scaler_pp_lut = hmm_alloc(sizeof(zoom_table)); if (pipe->scaler_pp_lut == mmgr_NULL) { ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, @@ -2709,7 +2702,7 @@ int sh_css_params_map_and_store_default_gdc_lut(void) host_lut_store((void *)zoom_table); - default_gdc_lut = hmm_alloc(sizeof(zoom_table), HMM_BO_PRIVATE, 0, NULL, 0); + default_gdc_lut = hmm_alloc(sizeof(zoom_table)); if (default_gdc_lut == mmgr_NULL) return -ENOMEM; @@ -3802,7 +3795,7 @@ static int write_ia_css_isp_parameter_set_info_to_ddr( assert(out); *out = ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_SET_POOL, - hmm_alloc(sizeof(struct ia_css_isp_parameter_set_info), HMM_BO_PRIVATE, 0, NULL, 0)); + hmm_alloc(sizeof(struct ia_css_isp_parameter_set_info))); succ = (*out != mmgr_NULL); if (succ) hmm_store(*out, diff --git a/drivers/staging/media/av7110/av7110.c b/drivers/staging/media/av7110/av7110.c index d74ee0ecfb36..df81a9b744c2 100644 --- a/drivers/staging/media/av7110/av7110.c +++ b/drivers/staging/media/av7110/av7110.c @@ -2364,7 +2364,7 @@ static int av7110_attach(struct saa7146_dev* dev, budgetpatch = 0; /* autodetect the presence of budget patch * this only works if saa7146 has been recently - * reset with with MASK_31 to MC1 + * reset with MASK_31 to MC1 * * will wait for VBI_B event (vertical blank at port B) * and will reset GPIO3 after VBI_B is detected. diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h index 26308bb29adc..2989ebc631cc 100644 --- a/drivers/staging/media/hantro/hantro.h +++ b/drivers/staging/media/hantro/hantro.h @@ -227,6 +227,7 @@ struct hantro_dev { * * @ctrl_handler: Control handler used to register controls. * @jpeg_quality: User-specified JPEG compression quality. + * @bit_depth: Bit depth of current frame * * @codec_ops: Set of operations related to codec mode. * @postproc: Post-processing context. @@ -252,6 +253,7 @@ struct hantro_ctx { struct v4l2_ctrl_handler ctrl_handler; int jpeg_quality; + int bit_depth; const struct hantro_codec_ops *codec_ops; struct hantro_postproc_ctx postproc; @@ -277,6 +279,7 @@ struct hantro_ctx { * @enc_fmt: Format identifier for encoder registers. * @frmsize: Supported range of frame sizes (only for bitstream formats). * @postprocessed: Indicates if this format needs the post-processor. + * @match_depth: Indicates if format bit depth must match video bit depth */ struct hantro_fmt { char *name; @@ -287,6 +290,7 @@ struct hantro_fmt { enum hantro_enc_fmt enc_fmt; struct v4l2_frmsize_stepwise frmsize; bool postprocessed; + bool match_depth; }; struct hantro_reg { diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c index ac232b5f7825..2036f72eeb4a 100644 --- a/drivers/staging/media/hantro/hantro_drv.c +++ b/drivers/staging/media/hantro/hantro_drv.c @@ -47,12 +47,10 @@ dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts) { struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx); struct vb2_buffer *buf; - int index; - index = vb2_find_timestamp(q, ts, 0); - if (index < 0) + buf = vb2_find_buffer(q, ts); + if (!buf) return 0; - buf = vb2_get_buffer(q, index); return hantro_get_dec_buf_addr(ctx, buf); } @@ -265,7 +263,7 @@ static int hantro_try_ctrl(struct v4l2_ctrl *ctrl) if (sps->bit_depth_luma_minus8 != 0) /* Only 8-bit is supported */ return -EINVAL; - } else if (ctrl->id == V4L2_CID_MPEG_VIDEO_HEVC_SPS) { + } else if (ctrl->id == V4L2_CID_STATELESS_HEVC_SPS) { const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps; if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8) @@ -304,18 +302,16 @@ static int hantro_jpeg_s_ctrl(struct v4l2_ctrl *ctrl) return 0; } -static int hantro_hevc_s_ctrl(struct v4l2_ctrl *ctrl) +static int hantro_vp9_s_ctrl(struct v4l2_ctrl *ctrl) { struct hantro_ctx *ctx; ctx = container_of(ctrl->handler, struct hantro_ctx, ctrl_handler); - vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val); - switch (ctrl->id) { - case V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP: - ctx->hevc_dec.ctrls.hevc_hdr_skip_length = ctrl->val; + case V4L2_CID_STATELESS_VP9_FRAME: + ctx->bit_depth = ctrl->p_new.p_vp9_frame->bit_depth; break; default: return -EINVAL; @@ -332,8 +328,8 @@ static const struct v4l2_ctrl_ops hantro_jpeg_ctrl_ops = { .s_ctrl = hantro_jpeg_s_ctrl, }; -static const struct v4l2_ctrl_ops hantro_hevc_ctrl_ops = { - .s_ctrl = hantro_hevc_s_ctrl, +static const struct v4l2_ctrl_ops hantro_vp9_ctrl_ops = { + .s_ctrl = hantro_vp9_s_ctrl, }; #define HANTRO_JPEG_ACTIVE_MARKERS (V4L2_JPEG_ACTIVE_MARKER_APP0 | \ @@ -438,18 +434,18 @@ static const struct hantro_ctrl controls[] = { }, { .codec = HANTRO_HEVC_DECODER, .cfg = { - .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE, - .min = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED, - .max = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED, - .def = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED, + .id = V4L2_CID_STATELESS_HEVC_DECODE_MODE, + .min = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED, + .max = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED, + .def = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED, }, }, { .codec = HANTRO_HEVC_DECODER, .cfg = { - .id = V4L2_CID_MPEG_VIDEO_HEVC_START_CODE, - .min = V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B, - .max = V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B, - .def = V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B, + .id = V4L2_CID_STATELESS_HEVC_START_CODE, + .min = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B, + .max = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B, + .def = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B, }, }, { .codec = HANTRO_HEVC_DECODER, @@ -469,40 +465,29 @@ static const struct hantro_ctrl controls[] = { }, { .codec = HANTRO_HEVC_DECODER, .cfg = { - .id = V4L2_CID_MPEG_VIDEO_HEVC_SPS, + .id = V4L2_CID_STATELESS_HEVC_SPS, .ops = &hantro_ctrl_ops, }, }, { .codec = HANTRO_HEVC_DECODER, .cfg = { - .id = V4L2_CID_MPEG_VIDEO_HEVC_PPS, + .id = V4L2_CID_STATELESS_HEVC_PPS, }, }, { .codec = HANTRO_HEVC_DECODER, .cfg = { - .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS, + .id = V4L2_CID_STATELESS_HEVC_DECODE_PARAMS, }, }, { .codec = HANTRO_HEVC_DECODER, .cfg = { - .id = V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX, - }, - }, { - .codec = HANTRO_HEVC_DECODER, - .cfg = { - .id = V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP, - .name = "Hantro HEVC slice header skip bytes", - .type = V4L2_CTRL_TYPE_INTEGER, - .min = 0, - .def = 0, - .max = 0x100, - .step = 1, - .ops = &hantro_hevc_ctrl_ops, + .id = V4L2_CID_STATELESS_HEVC_SCALING_MATRIX, }, }, { .codec = HANTRO_VP9_DECODER, .cfg = { .id = V4L2_CID_STATELESS_VP9_FRAME, + .ops = &hantro_vp9_ctrl_ops, }, }, { .codec = HANTRO_VP9_DECODER, @@ -638,6 +623,7 @@ static const struct of_device_id of_hantro_match[] = { { .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, }, { .compatible = "rockchip,rk3328-vpu", .data = &rk3328_vpu_variant, }, { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, }, + { .compatible = "rockchip,rk3568-vepu", .data = &rk3568_vepu_variant, }, { .compatible = "rockchip,rk3568-vpu", .data = &rk3568_vpu_variant, }, #endif #ifdef CONFIG_VIDEO_HANTRO_IMX8M diff --git a/drivers/staging/media/hantro/hantro_g2_hevc_dec.c b/drivers/staging/media/hantro/hantro_g2_hevc_dec.c index 5df6f08e26f5..233ecd863d5f 100644 --- a/drivers/staging/media/hantro/hantro_g2_hevc_dec.c +++ b/drivers/staging/media/hantro/hantro_g2_hevc_dec.c @@ -117,6 +117,41 @@ static void prepare_tile_info_buffer(struct hantro_ctx *ctx) vpu_debug(1, "%s: no chroma!\n", __func__); } +static int compute_header_skip_length(struct hantro_ctx *ctx) +{ + const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls; + const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params; + const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps; + const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps; + int skip = 0; + + if (pps->flags & V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT) + /* size of pic_output_flag */ + skip++; + + if (sps->flags & V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE) + /* size of pic_order_cnt_lsb */ + skip += 2; + + if (!(decode_params->flags & V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC)) { + /* size of pic_order_cnt_lsb */ + skip += sps->log2_max_pic_order_cnt_lsb_minus4 + 4; + + /* size of short_term_ref_pic_set_sps_flag */ + skip++; + + if (decode_params->short_term_ref_pic_set_size) + /* size of st_ref_pic_set( num_short_term_ref_pic_sets ) */ + skip += decode_params->short_term_ref_pic_set_size; + else if (sps->num_short_term_ref_pic_sets > 1) + skip += fls(sps->num_short_term_ref_pic_sets - 1); + + skip += decode_params->long_term_ref_pic_set_size; + } + + return skip; +} + static void set_params(struct hantro_ctx *ctx) { const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls; @@ -134,7 +169,7 @@ static void set_params(struct hantro_ctx *ctx) hantro_reg_write(vpu, &g2_output_8_bits, 0); - hantro_reg_write(vpu, &g2_hdr_skip_length, ctrls->hevc_hdr_skip_length); + hantro_reg_write(vpu, &g2_hdr_skip_length, compute_header_skip_length(ctx)); min_log2_cb_size = sps->log2_min_luma_coding_block_size_minus3 + 3; max_log2_ctb_size = min_log2_cb_size + sps->log2_diff_max_min_luma_coding_block_size; @@ -390,11 +425,10 @@ static int set_ref(struct hantro_ctx *ctx) !!(pps->flags & V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED)); /* - * Write POC count diff from current pic. For frame decoding only compute - * pic_order_cnt[0] and ignore pic_order_cnt[1] used in field-coding. + * Write POC count diff from current pic. */ for (i = 0; i < decode_params->num_active_dpb_entries && i < ARRAY_SIZE(cur_poc); i++) { - char poc_diff = decode_params->pic_order_cnt_val - dpb[i].pic_order_cnt[0]; + char poc_diff = decode_params->pic_order_cnt_val - dpb[i].pic_order_cnt_val; hantro_reg_write(vpu, &cur_poc[i], poc_diff); } @@ -421,7 +455,7 @@ static int set_ref(struct hantro_ctx *ctx) dpb_longterm_e = 0; for (i = 0; i < decode_params->num_active_dpb_entries && i < (V4L2_HEVC_DPB_ENTRIES_NUM_MAX - 1); i++) { - luma_addr = hantro_hevc_get_ref_buf(ctx, dpb[i].pic_order_cnt[0]); + luma_addr = hantro_hevc_get_ref_buf(ctx, dpb[i].pic_order_cnt_val); if (!luma_addr) return -ENOMEM; diff --git a/drivers/staging/media/hantro/hantro_g2_regs.h b/drivers/staging/media/hantro/hantro_g2_regs.h index 877d663a8181..82606783591a 100644 --- a/drivers/staging/media/hantro/hantro_g2_regs.h +++ b/drivers/staging/media/hantro/hantro_g2_regs.h @@ -107,7 +107,7 @@ #define g2_start_code_e G2_DEC_REG(10, 31, 0x1) #define g2_init_qp_old G2_DEC_REG(10, 25, 0x3f) -#define g2_init_qp G2_DEC_REG(10, 24, 0x3f) +#define g2_init_qp G2_DEC_REG(10, 24, 0x7f) #define g2_num_tile_cols_old G2_DEC_REG(10, 20, 0x1f) #define g2_num_tile_cols G2_DEC_REG(10, 19, 0x1f) #define g2_num_tile_rows_old G2_DEC_REG(10, 15, 0x1f) diff --git a/drivers/staging/media/hantro/hantro_g2_vp9_dec.c b/drivers/staging/media/hantro/hantro_g2_vp9_dec.c index 91c21b634fab..6fc4b555517f 100644 --- a/drivers/staging/media/hantro/hantro_g2_vp9_dec.c +++ b/drivers/staging/media/hantro/hantro_g2_vp9_dec.c @@ -111,17 +111,17 @@ get_ref_buf(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *dst, u64 timestamp) { struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx; struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q; - int buf_idx; + struct vb2_buffer *buf; /* * If a ref is unused or invalid, address of current destination * buffer is returned. */ - buf_idx = vb2_find_timestamp(cap_q, timestamp, 0); - if (buf_idx < 0) - return vb2_to_hantro_decoded_buf(&dst->vb2_buf); + buf = vb2_find_buffer(cap_q, timestamp); + if (!buf) + buf = &dst->vb2_buf; - return vb2_to_hantro_decoded_buf(vb2_get_buffer(cap_q, buf_idx)); + return vb2_to_hantro_decoded_buf(buf); } static void update_dec_buf_info(struct hantro_decoded_buffer *buf, @@ -515,16 +515,8 @@ static void config_bit_depth(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params) { if (ctx->dev->variant->legacy_regs) { - u8 pp_shift = 0; - hantro_reg_write(ctx->dev, &g2_bit_depth_y, dec_params->bit_depth); hantro_reg_write(ctx->dev, &g2_bit_depth_c, dec_params->bit_depth); - hantro_reg_write(ctx->dev, &g2_rs_out_bit_depth, dec_params->bit_depth); - - if (dec_params->bit_depth > 8) - pp_shift = 16 - dec_params->bit_depth; - - hantro_reg_write(ctx->dev, &g2_pp_pix_shift, pp_shift); hantro_reg_write(ctx->dev, &g2_pix_shift, 0); } else { hantro_reg_write(ctx->dev, &g2_bit_depth_y_minus8, dec_params->bit_depth - 8); diff --git a/drivers/staging/media/hantro/hantro_hevc.c b/drivers/staging/media/hantro/hantro_hevc.c index f86c98e19177..b990bc98164c 100644 --- a/drivers/staging/media/hantro/hantro_hevc.c +++ b/drivers/staging/media/hantro/hantro_hevc.c @@ -33,7 +33,7 @@ void hantro_hevc_ref_init(struct hantro_ctx *ctx) } dma_addr_t hantro_hevc_get_ref_buf(struct hantro_ctx *ctx, - int poc) + s32 poc) { struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec; int i; @@ -154,6 +154,25 @@ err_free_tile_buffers: return -ENOMEM; } +static int hantro_hevc_validate_sps(struct hantro_ctx *ctx, const struct v4l2_ctrl_hevc_sps *sps) +{ + /* + * for tile pixel format check if the width and height match + * hardware constraints + */ + if (ctx->vpu_dst_fmt->fourcc == V4L2_PIX_FMT_NV12_4L4) { + if (ctx->dst_fmt.width != + ALIGN(sps->pic_width_in_luma_samples, ctx->vpu_dst_fmt->frmsize.step_width)) + return -EINVAL; + + if (ctx->dst_fmt.height != + ALIGN(sps->pic_height_in_luma_samples, ctx->vpu_dst_fmt->frmsize.step_height)) + return -EINVAL; + } + + return 0; +} + int hantro_hevc_dec_prepare_run(struct hantro_ctx *ctx) { struct hantro_hevc_dec_hw_ctx *hevc_ctx = &ctx->hevc_dec; @@ -163,22 +182,26 @@ int hantro_hevc_dec_prepare_run(struct hantro_ctx *ctx) hantro_start_prepare_run(ctx); ctrls->decode_params = - hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS); + hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_DECODE_PARAMS); if (WARN_ON(!ctrls->decode_params)) return -EINVAL; ctrls->scaling = - hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX); + hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_SCALING_MATRIX); if (WARN_ON(!ctrls->scaling)) return -EINVAL; ctrls->sps = - hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_HEVC_SPS); + hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_SPS); if (WARN_ON(!ctrls->sps)) return -EINVAL; + ret = hantro_hevc_validate_sps(ctx, ctrls->sps); + if (ret) + return ret; + ctrls->pps = - hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_HEVC_PPS); + hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_PPS); if (WARN_ON(!ctrls->pps)) return -EINVAL; diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h index 52a960f6fa4a..e83f0c523a30 100644 --- a/drivers/staging/media/hantro/hantro_hw.h +++ b/drivers/staging/media/hantro/hantro_hw.h @@ -18,9 +18,21 @@ #define DEC_8190_ALIGN_MASK 0x07U #define MB_DIM 16 +#define TILE_MB_DIM 4 #define MB_WIDTH(w) DIV_ROUND_UP(w, MB_DIM) #define MB_HEIGHT(h) DIV_ROUND_UP(h, MB_DIM) +#define FMT_MIN_WIDTH 48 +#define FMT_MIN_HEIGHT 48 +#define FMT_HD_WIDTH 1280 +#define FMT_HD_HEIGHT 720 +#define FMT_FHD_WIDTH 1920 +#define FMT_FHD_HEIGHT 1088 +#define FMT_UHD_WIDTH 3840 +#define FMT_UHD_HEIGHT 2160 +#define FMT_4K_WIDTH 4096 +#define FMT_4K_HEIGHT 2304 + #define NUM_REF_PICTURES (V4L2_HEVC_DPB_ENTRIES_NUM_MAX + 1) struct hantro_dev; @@ -133,7 +145,7 @@ struct hantro_hevc_dec_hw_ctx { struct hantro_aux_buf tile_bsd; struct hantro_aux_buf ref_bufs[NUM_REF_PICTURES]; struct hantro_aux_buf scaling_lists; - int ref_bufs_poc[NUM_REF_PICTURES]; + s32 ref_bufs_poc[NUM_REF_PICTURES]; u32 ref_bufs_used; struct hantro_hevc_dec_ctrls ctrls; unsigned int num_tile_cols_allocated; @@ -306,6 +318,7 @@ extern const struct hantro_variant rk3066_vpu_variant; extern const struct hantro_variant rk3288_vpu_variant; extern const struct hantro_variant rk3328_vpu_variant; extern const struct hantro_variant rk3399_vpu_variant; +extern const struct hantro_variant rk3568_vepu_variant; extern const struct hantro_variant rk3568_vpu_variant; extern const struct hantro_variant sama5d4_vdec_variant; extern const struct hantro_variant sunxi_vpu_variant; @@ -345,9 +358,10 @@ void hantro_hevc_dec_exit(struct hantro_ctx *ctx); int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx); int hantro_hevc_dec_prepare_run(struct hantro_ctx *ctx); void hantro_hevc_ref_init(struct hantro_ctx *ctx); -dma_addr_t hantro_hevc_get_ref_buf(struct hantro_ctx *ctx, int poc); +dma_addr_t hantro_hevc_get_ref_buf(struct hantro_ctx *ctx, s32 poc); int hantro_hevc_add_ref_buf(struct hantro_ctx *ctx, int poc, dma_addr_t addr); + static inline unsigned short hantro_vp9_num_sbs(unsigned short dimension) { return (dimension + 63) / 64; diff --git a/drivers/staging/media/hantro/hantro_postproc.c b/drivers/staging/media/hantro/hantro_postproc.c index ab168c1c0d28..a0928c508434 100644 --- a/drivers/staging/media/hantro/hantro_postproc.c +++ b/drivers/staging/media/hantro/hantro_postproc.c @@ -12,6 +12,7 @@ #include "hantro_hw.h" #include "hantro_g1_regs.h" #include "hantro_g2_regs.h" +#include "hantro_v4l2.h" #define HANTRO_PP_REG_WRITE(vpu, reg_name, val) \ { \ @@ -112,12 +113,14 @@ static void hantro_postproc_g2_enable(struct hantro_ctx *ctx) { struct hantro_dev *vpu = ctx->dev; struct vb2_v4l2_buffer *dst_buf; - size_t chroma_offset = ctx->dst_fmt.width * ctx->dst_fmt.height; int down_scale = down_scale_factor(ctx); + size_t chroma_offset; dma_addr_t dst_dma; dst_buf = hantro_get_dst_buf(ctx); dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); + chroma_offset = ctx->dst_fmt.plane_fmt[0].bytesperline * + ctx->dst_fmt.height; if (down_scale) { hantro_reg_write(vpu, &g2_down_scale_e, 1); @@ -129,6 +132,16 @@ static void hantro_postproc_g2_enable(struct hantro_ctx *ctx) hantro_write_addr(vpu, G2_RS_OUT_LUMA_ADDR, dst_dma); hantro_write_addr(vpu, G2_RS_OUT_CHROMA_ADDR, dst_dma + chroma_offset); } + if (ctx->dev->variant->legacy_regs) { + int out_depth = hantro_get_format_depth(ctx->dst_fmt.pixelformat); + u8 pp_shift = 0; + + if (out_depth > 8) + pp_shift = 16 - out_depth; + + hantro_reg_write(ctx->dev, &g2_rs_out_bit_depth, out_depth); + hantro_reg_write(ctx->dev, &g2_pp_pix_shift, pp_shift); + } hantro_reg_write(vpu, &g2_out_rs_e, 1); } @@ -174,18 +187,27 @@ int hantro_postproc_alloc(struct hantro_ctx *ctx) struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx; struct vb2_queue *cap_queue = &m2m_ctx->cap_q_ctx.q; unsigned int num_buffers = cap_queue->num_buffers; + struct v4l2_pix_format_mplane pix_mp; + const struct hantro_fmt *fmt; unsigned int i, buf_size; - buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage; + /* this should always pick native format */ + fmt = hantro_get_default_fmt(ctx, false); + if (!fmt) + return -EINVAL; + v4l2_fill_pixfmt_mp(&pix_mp, fmt->fourcc, ctx->src_fmt.width, + ctx->src_fmt.height); + + buf_size = pix_mp.plane_fmt[0].sizeimage; if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE) - buf_size += hantro_h264_mv_size(ctx->dst_fmt.width, - ctx->dst_fmt.height); + buf_size += hantro_h264_mv_size(pix_mp.width, + pix_mp.height); else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME) - buf_size += hantro_vp9_mv_size(ctx->dst_fmt.width, - ctx->dst_fmt.height); + buf_size += hantro_vp9_mv_size(pix_mp.width, + pix_mp.height); else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE) - buf_size += hantro_hevc_mv_size(ctx->dst_fmt.width, - ctx->dst_fmt.height); + buf_size += hantro_hevc_mv_size(pix_mp.width, + pix_mp.height); for (i = 0; i < num_buffers; ++i) { struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i]; diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c index 22ad182ee972..2c7a805289e7 100644 --- a/drivers/staging/media/hantro/hantro_v4l2.c +++ b/drivers/staging/media/hantro/hantro_v4l2.c @@ -64,6 +64,42 @@ hantro_get_postproc_formats(const struct hantro_ctx *ctx, return ctx->dev->variant->postproc_fmts; } +int hantro_get_format_depth(u32 fourcc) +{ + switch (fourcc) { + case V4L2_PIX_FMT_P010: + case V4L2_PIX_FMT_P010_4L4: + return 10; + default: + return 8; + } +} + +static bool +hantro_check_depth_match(const struct hantro_ctx *ctx, + const struct hantro_fmt *fmt) +{ + int fmt_depth, ctx_depth = 8; + + if (!fmt->match_depth && !fmt->postprocessed) + return true; + + /* 0 means default depth, which is 8 */ + if (ctx->bit_depth) + ctx_depth = ctx->bit_depth; + + fmt_depth = hantro_get_format_depth(fmt->fourcc); + + /* + * Allow only downconversion for postproc formats for now. + * It may be possible to relax that on some HW. + */ + if (!fmt->match_depth) + return fmt_depth <= ctx_depth; + + return fmt_depth == ctx_depth; +} + static const struct hantro_fmt * hantro_find_format(const struct hantro_ctx *ctx, u32 fourcc) { @@ -82,7 +118,7 @@ hantro_find_format(const struct hantro_ctx *ctx, u32 fourcc) return NULL; } -static const struct hantro_fmt * +const struct hantro_fmt * hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream) { const struct hantro_fmt *formats; @@ -91,7 +127,8 @@ hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream) formats = hantro_get_formats(ctx, &num_fmts); for (i = 0; i < num_fmts; i++) { if (bitstream == (formats[i].codec_mode != - HANTRO_MODE_NONE)) + HANTRO_MODE_NONE) && + hantro_check_depth_match(ctx, &formats[i])) return &formats[i]; } return NULL; @@ -162,11 +199,13 @@ static int vidioc_enum_fmt(struct file *file, void *priv, formats = hantro_get_formats(ctx, &num_fmts); for (i = 0; i < num_fmts; i++) { bool mode_none = formats[i].codec_mode == HANTRO_MODE_NONE; + fmt = &formats[i]; if (skip_mode_none == mode_none) continue; + if (!hantro_check_depth_match(ctx, fmt)) + continue; if (j == f->index) { - fmt = &formats[i]; f->pixelformat = fmt->fourcc; return 0; } @@ -182,8 +221,11 @@ static int vidioc_enum_fmt(struct file *file, void *priv, return -EINVAL; formats = hantro_get_postproc_formats(ctx, &num_fmts); for (i = 0; i < num_fmts; i++) { + fmt = &formats[i]; + + if (!hantro_check_depth_match(ctx, fmt)) + continue; if (j == f->index) { - fmt = &formats[i]; f->pixelformat = fmt->fourcc; return 0; } @@ -259,7 +301,7 @@ static int hantro_try_fmt(const struct hantro_ctx *ctx, } else if (ctx->is_encoder) { vpu_fmt = ctx->vpu_dst_fmt; } else { - vpu_fmt = ctx->vpu_src_fmt; + vpu_fmt = fmt; /* * Width/height on the CAPTURE end of a decoder are ignored and * replaced by the OUTPUT ones. diff --git a/drivers/staging/media/hantro/hantro_v4l2.h b/drivers/staging/media/hantro/hantro_v4l2.h index 18bc682c8556..64f6f57e9d7a 100644 --- a/drivers/staging/media/hantro/hantro_v4l2.h +++ b/drivers/staging/media/hantro/hantro_v4l2.h @@ -22,5 +22,8 @@ extern const struct v4l2_ioctl_ops hantro_ioctl_ops; extern const struct vb2_ops hantro_queue_ops; void hantro_reset_fmts(struct hantro_ctx *ctx); +int hantro_get_format_depth(u32 fourcc); +const struct hantro_fmt * +hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream); #endif /* HANTRO_V4L2_H_ */ diff --git a/drivers/staging/media/hantro/imx8m_vpu_hw.c b/drivers/staging/media/hantro/imx8m_vpu_hw.c index 9802508bade2..77f574fdfa77 100644 --- a/drivers/staging/media/hantro/imx8m_vpu_hw.c +++ b/drivers/staging/media/hantro/imx8m_vpu_hw.c @@ -83,6 +83,14 @@ static const struct hantro_fmt imx8m_vpu_postproc_fmts[] = { .fourcc = V4L2_PIX_FMT_YUYV, .codec_mode = HANTRO_MODE_NONE, .postprocessed = true, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, + .step_width = MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, + .step_height = MB_DIM, + }, }, }; @@ -90,17 +98,25 @@ static const struct hantro_fmt imx8m_vpu_dec_fmts[] = { { .fourcc = V4L2_PIX_FMT_NV12, .codec_mode = HANTRO_MODE_NONE, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, + .step_width = MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, + .step_height = MB_DIM, + }, }, { .fourcc = V4L2_PIX_FMT_MPEG2_SLICE, .codec_mode = HANTRO_MODE_MPEG2_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 1920, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_FHD_WIDTH, .step_width = MB_DIM, - .min_height = 48, - .max_height = 1088, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_FHD_HEIGHT, .step_height = MB_DIM, }, }, @@ -109,11 +125,11 @@ static const struct hantro_fmt imx8m_vpu_dec_fmts[] = { .codec_mode = HANTRO_MODE_VP8_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 3840, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, .step_width = MB_DIM, - .min_height = 48, - .max_height = 2160, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, .step_height = MB_DIM, }, }, @@ -122,11 +138,11 @@ static const struct hantro_fmt imx8m_vpu_dec_fmts[] = { .codec_mode = HANTRO_MODE_H264_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 3840, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, .step_width = MB_DIM, - .min_height = 48, - .max_height = 2160, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, .step_height = MB_DIM, }, }, @@ -137,6 +153,14 @@ static const struct hantro_fmt imx8m_vpu_g2_postproc_fmts[] = { .fourcc = V4L2_PIX_FMT_NV12, .codec_mode = HANTRO_MODE_NONE, .postprocessed = true, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, + .step_width = MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, + .step_height = MB_DIM, + }, }, }; @@ -144,18 +168,26 @@ static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = { { .fourcc = V4L2_PIX_FMT_NV12_4L4, .codec_mode = HANTRO_MODE_NONE, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, + .step_width = TILE_MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, + .step_height = TILE_MB_DIM, + }, }, { .fourcc = V4L2_PIX_FMT_HEVC_SLICE, .codec_mode = HANTRO_MODE_HEVC_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 3840, - .step_width = MB_DIM, - .min_height = 48, - .max_height = 2160, - .step_height = MB_DIM, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, + .step_width = TILE_MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, + .step_height = TILE_MB_DIM, }, }, { @@ -163,12 +195,12 @@ static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = { .codec_mode = HANTRO_MODE_VP9_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 3840, - .step_width = MB_DIM, - .min_height = 48, - .max_height = 2160, - .step_height = MB_DIM, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, + .step_width = TILE_MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, + .step_height = TILE_MB_DIM, }, }, }; diff --git a/drivers/staging/media/hantro/rockchip_vpu_hw.c b/drivers/staging/media/hantro/rockchip_vpu_hw.c index fc96501f3bc8..8de6fd2e8eef 100644 --- a/drivers/staging/media/hantro/rockchip_vpu_hw.c +++ b/drivers/staging/media/hantro/rockchip_vpu_hw.c @@ -63,6 +63,14 @@ static const struct hantro_fmt rockchip_vpu1_postproc_fmts[] = { .fourcc = V4L2_PIX_FMT_YUYV, .codec_mode = HANTRO_MODE_NONE, .postprocessed = true, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_FHD_WIDTH, + .step_width = MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_FHD_HEIGHT, + .step_height = MB_DIM, + }, }, }; @@ -70,17 +78,25 @@ static const struct hantro_fmt rk3066_vpu_dec_fmts[] = { { .fourcc = V4L2_PIX_FMT_NV12, .codec_mode = HANTRO_MODE_NONE, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_FHD_WIDTH, + .step_width = MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_FHD_HEIGHT, + .step_height = MB_DIM, + }, }, { .fourcc = V4L2_PIX_FMT_H264_SLICE, .codec_mode = HANTRO_MODE_H264_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 1920, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_FHD_WIDTH, .step_width = MB_DIM, - .min_height = 48, - .max_height = 1088, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_FHD_HEIGHT, .step_height = MB_DIM, }, }, @@ -89,11 +105,11 @@ static const struct hantro_fmt rk3066_vpu_dec_fmts[] = { .codec_mode = HANTRO_MODE_MPEG2_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 1920, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_FHD_WIDTH, .step_width = MB_DIM, - .min_height = 48, - .max_height = 1088, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_FHD_HEIGHT, .step_height = MB_DIM, }, }, @@ -102,11 +118,11 @@ static const struct hantro_fmt rk3066_vpu_dec_fmts[] = { .codec_mode = HANTRO_MODE_VP8_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 1920, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_FHD_WIDTH, .step_width = MB_DIM, - .min_height = 48, - .max_height = 1088, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_FHD_HEIGHT, .step_height = MB_DIM, }, }, @@ -116,17 +132,25 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = { { .fourcc = V4L2_PIX_FMT_NV12, .codec_mode = HANTRO_MODE_NONE, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_4K_WIDTH, + .step_width = MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_4K_HEIGHT, + .step_height = MB_DIM, + }, }, { .fourcc = V4L2_PIX_FMT_H264_SLICE, .codec_mode = HANTRO_MODE_H264_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 4096, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_4K_WIDTH, .step_width = MB_DIM, - .min_height = 48, - .max_height = 2304, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_4K_HEIGHT, .step_height = MB_DIM, }, }, @@ -135,11 +159,11 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = { .codec_mode = HANTRO_MODE_MPEG2_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 1920, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_FHD_WIDTH, .step_width = MB_DIM, - .min_height = 48, - .max_height = 1088, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_FHD_HEIGHT, .step_height = MB_DIM, }, }, @@ -148,31 +172,80 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = { .codec_mode = HANTRO_MODE_VP8_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 3840, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, .step_width = MB_DIM, - .min_height = 48, - .max_height = 2160, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, .step_height = MB_DIM, }, }, }; -static const struct hantro_fmt rk3399_vpu_dec_fmts[] = { +static const struct hantro_fmt rockchip_vdpu2_dec_fmts[] = { { .fourcc = V4L2_PIX_FMT_NV12, .codec_mode = HANTRO_MODE_NONE, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_FHD_WIDTH, + .step_width = MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_FHD_HEIGHT, + .step_height = MB_DIM, + }, }, { .fourcc = V4L2_PIX_FMT_H264_SLICE, .codec_mode = HANTRO_MODE_H264_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 1920, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_FHD_WIDTH, + .step_width = MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_FHD_HEIGHT, + .step_height = MB_DIM, + }, + }, + { + .fourcc = V4L2_PIX_FMT_MPEG2_SLICE, + .codec_mode = HANTRO_MODE_MPEG2_DEC, + .max_depth = 2, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_FHD_WIDTH, + .step_width = MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_FHD_HEIGHT, + .step_height = MB_DIM, + }, + }, + { + .fourcc = V4L2_PIX_FMT_VP8_FRAME, + .codec_mode = HANTRO_MODE_VP8_DEC, + .max_depth = 2, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, + .step_width = MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, + .step_height = MB_DIM, + }, + }, +}; + +static const struct hantro_fmt rk3399_vpu_dec_fmts[] = { + { + .fourcc = V4L2_PIX_FMT_NV12, + .codec_mode = HANTRO_MODE_NONE, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_FHD_WIDTH, .step_width = MB_DIM, - .min_height = 48, - .max_height = 1088, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_FHD_HEIGHT, .step_height = MB_DIM, }, }, @@ -181,11 +254,11 @@ static const struct hantro_fmt rk3399_vpu_dec_fmts[] = { .codec_mode = HANTRO_MODE_MPEG2_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 1920, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_FHD_WIDTH, .step_width = MB_DIM, - .min_height = 48, - .max_height = 1088, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_FHD_HEIGHT, .step_height = MB_DIM, }, }, @@ -194,11 +267,11 @@ static const struct hantro_fmt rk3399_vpu_dec_fmts[] = { .codec_mode = HANTRO_MODE_VP8_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 3840, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, .step_width = MB_DIM, - .min_height = 48, - .max_height = 2160, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, .step_height = MB_DIM, }, }, @@ -417,6 +490,14 @@ static const struct hantro_codec_ops rk3399_vpu_codec_ops[] = { }, }; +static const struct hantro_codec_ops rk3568_vepu_codec_ops[] = { + [HANTRO_MODE_JPEG_ENC] = { + .run = rockchip_vpu2_jpeg_enc_run, + .reset = rockchip_vpu2_enc_reset, + .done = rockchip_vpu2_jpeg_enc_done, + }, +}; + /* * VPU variant. */ @@ -439,6 +520,10 @@ static const struct hantro_irq rockchip_vpu2_irqs[] = { { "vdpu", rockchip_vpu2_vdpu_irq }, }; +static const struct hantro_irq rk3568_vepu_irqs[] = { + { "vepu", rockchip_vpu2_vepu_irq }, +}; + static const char * const rk3066_vpu_clk_names[] = { "aclk_vdpu", "hclk_vdpu", "aclk_vepu", "hclk_vepu" @@ -516,8 +601,8 @@ const struct hantro_variant rk3288_vpu_variant = { const struct hantro_variant rk3328_vpu_variant = { .dec_offset = 0x400, - .dec_fmts = rk3399_vpu_dec_fmts, - .num_dec_fmts = ARRAY_SIZE(rk3399_vpu_dec_fmts), + .dec_fmts = rockchip_vdpu2_dec_fmts, + .num_dec_fmts = ARRAY_SIZE(rockchip_vdpu2_dec_fmts), .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER | HANTRO_H264_DECODER, .codec_ops = rk3399_vpu_codec_ops, @@ -528,6 +613,11 @@ const struct hantro_variant rk3328_vpu_variant = { .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names), }; +/* + * H.264 decoding explicitly disabled in RK3399. + * This ensures userspace applications use the Rockchip VDEC core, + * which has better performance. + */ const struct hantro_variant rk3399_vpu_variant = { .enc_offset = 0x0, .enc_fmts = rockchip_vpu_enc_fmts, @@ -545,10 +635,23 @@ const struct hantro_variant rk3399_vpu_variant = { .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names) }; +const struct hantro_variant rk3568_vepu_variant = { + .enc_offset = 0x0, + .enc_fmts = rockchip_vpu_enc_fmts, + .num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts), + .codec = HANTRO_JPEG_ENCODER, + .codec_ops = rk3568_vepu_codec_ops, + .irqs = rk3568_vepu_irqs, + .num_irqs = ARRAY_SIZE(rk3568_vepu_irqs), + .init = rockchip_vpu_hw_init, + .clk_names = rockchip_vpu_clk_names, + .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names) +}; + const struct hantro_variant rk3568_vpu_variant = { .dec_offset = 0x400, - .dec_fmts = rk3399_vpu_dec_fmts, - .num_dec_fmts = ARRAY_SIZE(rk3399_vpu_dec_fmts), + .dec_fmts = rockchip_vdpu2_dec_fmts, + .num_dec_fmts = ARRAY_SIZE(rockchip_vdpu2_dec_fmts), .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER | HANTRO_H264_DECODER, .codec_ops = rk3399_vpu_codec_ops, @@ -564,8 +667,8 @@ const struct hantro_variant px30_vpu_variant = { .enc_fmts = rockchip_vpu_enc_fmts, .num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts), .dec_offset = 0x400, - .dec_fmts = rk3399_vpu_dec_fmts, - .num_dec_fmts = ARRAY_SIZE(rk3399_vpu_dec_fmts), + .dec_fmts = rockchip_vdpu2_dec_fmts, + .num_dec_fmts = ARRAY_SIZE(rockchip_vdpu2_dec_fmts), .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER | HANTRO_H264_DECODER, .codec_ops = rk3399_vpu_codec_ops, diff --git a/drivers/staging/media/hantro/sama5d4_vdec_hw.c b/drivers/staging/media/hantro/sama5d4_vdec_hw.c index b2fc1c5613e1..b205e2db5b04 100644 --- a/drivers/staging/media/hantro/sama5d4_vdec_hw.c +++ b/drivers/staging/media/hantro/sama5d4_vdec_hw.c @@ -16,6 +16,14 @@ static const struct hantro_fmt sama5d4_vdec_postproc_fmts[] = { .fourcc = V4L2_PIX_FMT_YUYV, .codec_mode = HANTRO_MODE_NONE, .postprocessed = true, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_HD_WIDTH, + .step_width = MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_HD_HEIGHT, + .step_height = MB_DIM, + }, }, }; @@ -23,17 +31,25 @@ static const struct hantro_fmt sama5d4_vdec_fmts[] = { { .fourcc = V4L2_PIX_FMT_NV12, .codec_mode = HANTRO_MODE_NONE, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_HD_WIDTH, + .step_width = MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_HD_HEIGHT, + .step_height = MB_DIM, + }, }, { .fourcc = V4L2_PIX_FMT_MPEG2_SLICE, .codec_mode = HANTRO_MODE_MPEG2_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 1280, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_HD_WIDTH, .step_width = MB_DIM, - .min_height = 48, - .max_height = 720, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_HD_HEIGHT, .step_height = MB_DIM, }, }, @@ -42,11 +58,11 @@ static const struct hantro_fmt sama5d4_vdec_fmts[] = { .codec_mode = HANTRO_MODE_VP8_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 1280, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_HD_WIDTH, .step_width = MB_DIM, - .min_height = 48, - .max_height = 720, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_HD_HEIGHT, .step_height = MB_DIM, }, }, @@ -55,11 +71,11 @@ static const struct hantro_fmt sama5d4_vdec_fmts[] = { .codec_mode = HANTRO_MODE_H264_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 1280, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_HD_WIDTH, .step_width = MB_DIM, - .min_height = 48, - .max_height = 720, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_HD_HEIGHT, .step_height = MB_DIM, }, }, diff --git a/drivers/staging/media/hantro/sunxi_vpu_hw.c b/drivers/staging/media/hantro/sunxi_vpu_hw.c index c0edd5856a0c..02ce8b064a8f 100644 --- a/drivers/staging/media/hantro/sunxi_vpu_hw.c +++ b/drivers/staging/media/hantro/sunxi_vpu_hw.c @@ -14,6 +14,27 @@ static const struct hantro_fmt sunxi_vpu_postproc_fmts[] = { .fourcc = V4L2_PIX_FMT_NV12, .codec_mode = HANTRO_MODE_NONE, .postprocessed = true, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, + .step_width = 32, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, + .step_height = 32, + }, + }, + { + .fourcc = V4L2_PIX_FMT_P010, + .codec_mode = HANTRO_MODE_NONE, + .postprocessed = true, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, + .step_width = 32, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, + .step_height = 32, + }, }, }; @@ -21,17 +42,39 @@ static const struct hantro_fmt sunxi_vpu_dec_fmts[] = { { .fourcc = V4L2_PIX_FMT_NV12_4L4, .codec_mode = HANTRO_MODE_NONE, + .match_depth = true, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, + .step_width = 32, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, + .step_height = 32, + }, + }, + { + .fourcc = V4L2_PIX_FMT_P010_4L4, + .codec_mode = HANTRO_MODE_NONE, + .match_depth = true, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, + .step_width = 32, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, + .step_height = 32, + }, }, { .fourcc = V4L2_PIX_FMT_VP9_FRAME, .codec_mode = HANTRO_MODE_VP9_DEC, .max_depth = 2, .frmsize = { - .min_width = 48, - .max_width = 3840, + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, .step_width = 32, - .min_height = 48, - .max_height = 2160, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, .step_height = 32, }, }, diff --git a/drivers/staging/media/imx/imx-media-dev-common.c b/drivers/staging/media/imx/imx-media-dev-common.c index 80b69a9a752c..e6d6ed3b1161 100644 --- a/drivers/staging/media/imx/imx-media-dev-common.c +++ b/drivers/staging/media/imx/imx-media-dev-common.c @@ -235,7 +235,7 @@ static int imx_media_inherit_controls(struct imx_media_dev *imxmd, if (!(spad->flags & MEDIA_PAD_FL_SINK)) continue; - pad = media_entity_remote_pad(spad); + pad = media_pad_remote_pad_first(spad); if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) continue; diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c index 94bc866ca28c..294c808b2ebe 100644 --- a/drivers/staging/media/imx/imx-media-utils.c +++ b/drivers/staging/media/imx/imx-media-utils.c @@ -698,7 +698,7 @@ imx_media_pipeline_pad(struct media_entity *start_entity, u32 grp_id, (!upstream && !(spad->flags & MEDIA_PAD_FL_SOURCE))) continue; - pad = media_entity_remote_pad(spad); + pad = media_pad_remote_pad_first(spad); if (!pad) continue; diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c index 8467a1491048..a0553c24cce4 100644 --- a/drivers/staging/media/imx/imx7-media-csi.c +++ b/drivers/staging/media/imx/imx7-media-csi.c @@ -17,18 +17,17 @@ #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/types.h> #include <media/v4l2-device.h> -#include <media/v4l2-event.h> #include <media/v4l2-fwnode.h> +#include <media/v4l2-ioctl.h> #include <media/v4l2-mc.h> #include <media/v4l2-subdev.h> #include <media/videobuf2-dma-contig.h> -#include <media/imx.h> -#include "imx-media.h" - #define IMX7_CSI_PAD_SINK 0 #define IMX7_CSI_PAD_SRC 1 #define IMX7_CSI_PADS_NUM 2 @@ -159,45 +158,102 @@ #define CSI_CSICR18 0x48 #define CSI_CSICR19 0x4c +#define IMX7_CSI_VIDEO_NAME "imx-capture" +/* In bytes, per queue */ +#define IMX7_CSI_VIDEO_MEM_LIMIT SZ_64M +#define IMX7_CSI_VIDEO_EOF_TIMEOUT 2000 + +#define IMX7_CSI_DEF_MBUS_CODE MEDIA_BUS_FMT_UYVY8_2X8 +#define IMX7_CSI_DEF_PIX_FORMAT V4L2_PIX_FMT_UYVY +#define IMX7_CSI_DEF_PIX_WIDTH 640 +#define IMX7_CSI_DEF_PIX_HEIGHT 480 + enum imx_csi_model { IMX7_CSI_IMX7 = 0, IMX7_CSI_IMX8MQ, }; +struct imx7_csi_pixfmt { + /* the in-memory FourCC pixel format */ + u32 fourcc; + /* + * the set of equivalent media bus codes for the fourcc. + * NOTE! codes pointer is NULL for in-memory-only formats. + */ + const u32 *codes; + int bpp; /* total bpp */ + bool yuv; +}; + +struct imx7_csi_vb2_buffer { + struct vb2_v4l2_buffer vbuf; + struct list_head list; +}; + +static inline struct imx7_csi_vb2_buffer * +to_imx7_csi_vb2_buffer(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + + return container_of(vbuf, struct imx7_csi_vb2_buffer, vbuf); +} + +struct imx7_csi_dma_buf { + void *virt; + dma_addr_t phys; + unsigned long len; +}; + struct imx7_csi { struct device *dev; - struct v4l2_subdev sd; - struct v4l2_async_notifier notifier; - struct imx_media_video_dev *vdev; - struct imx_media_dev *imxmd; - struct media_pad pad[IMX7_CSI_PADS_NUM]; - /* lock to protect members below */ - struct mutex lock; - /* lock to protect irq handler when stop streaming */ - spinlock_t irqlock; + /* Resources and locks */ + void __iomem *regbase; + int irq; + struct clk *mclk; + + struct mutex lock; /* Protects is_streaming, format_mbus, cc */ + spinlock_t irqlock; /* Protects last_eof */ + + /* Media and V4L2 device */ + struct media_device mdev; + struct v4l2_device v4l2_dev; + struct v4l2_async_notifier notifier; + struct media_pipeline pipe; struct v4l2_subdev *src_sd; + bool is_csi2; + + /* V4L2 subdev */ + struct v4l2_subdev sd; + struct media_pad pad[IMX7_CSI_PADS_NUM]; struct v4l2_mbus_framefmt format_mbus[IMX7_CSI_PADS_NUM]; - const struct imx_media_pixfmt *cc[IMX7_CSI_PADS_NUM]; - struct v4l2_fract frame_interval[IMX7_CSI_PADS_NUM]; + const struct imx7_csi_pixfmt *cc[IMX7_CSI_PADS_NUM]; - void __iomem *regbase; - int irq; - struct clk *mclk; + /* Video device */ + struct video_device *vdev; /* Video device */ + struct media_pad vdev_pad; /* Video device pad */ + + struct v4l2_pix_format vdev_fmt; /* The user format */ + const struct imx7_csi_pixfmt *vdev_cc; + struct v4l2_rect vdev_compose; /* The compose rectangle */ - /* active vb2 buffers to send to video dev sink */ - struct imx_media_buffer *active_vb2_buf[2]; - struct imx_media_dma_buf underrun_buf; + struct mutex vdev_mutex; /* Protect vdev operations */ + struct vb2_queue q; /* The videobuf2 queue */ + struct list_head ready_q; /* List of queued buffers */ + spinlock_t q_lock; /* Protect ready_q */ + + /* Buffers and streaming state */ + struct imx7_csi_vb2_buffer *active_vb2_buf[2]; + struct imx7_csi_dma_buf underrun_buf; + + bool is_streaming; int buf_num; u32 frame_sequence; bool last_eof; - bool is_streaming; - bool is_csi2; - struct completion last_eof_completion; enum imx_csi_model model; @@ -242,7 +298,8 @@ static void imx7_csi_init_default(struct imx7_csi *csi) imx7_csi_reg_write(csi, 0, CSI_CSICR2); imx7_csi_reg_write(csi, BIT_FRMCNT_RST, CSI_CSICR3); - imx7_csi_reg_write(csi, BIT_IMAGE_WIDTH(800) | BIT_IMAGE_HEIGHT(600), + imx7_csi_reg_write(csi, BIT_IMAGE_WIDTH(IMX7_CSI_DEF_PIX_WIDTH) | + BIT_IMAGE_HEIGHT(IMX7_CSI_DEF_PIX_HEIGHT), CSI_CSIIMAG_PARA); imx7_csi_reg_write(csi, BIT_DMA_REFLASH_RFF, CSI_CSICR3); @@ -336,16 +393,17 @@ static void imx7_csi_update_buf(struct imx7_csi *csi, dma_addr_t phys, imx7_csi_reg_write(csi, phys, CSI_CSIDMASA_FB1); } +static struct imx7_csi_vb2_buffer *imx7_csi_video_next_buf(struct imx7_csi *csi); + static void imx7_csi_setup_vb2_buf(struct imx7_csi *csi) { - struct imx_media_video_dev *vdev = csi->vdev; - struct imx_media_buffer *buf; + struct imx7_csi_vb2_buffer *buf; struct vb2_buffer *vb2_buf; dma_addr_t phys[2]; int i; for (i = 0; i < 2; i++) { - buf = imx_media_capture_device_next_buf(vdev); + buf = imx7_csi_video_next_buf(csi); if (buf) { csi->active_vb2_buf[i] = buf; vb2_buf = &buf->vbuf.vb2_buf; @@ -362,7 +420,7 @@ static void imx7_csi_setup_vb2_buf(struct imx7_csi *csi) static void imx7_csi_dma_unsetup_vb2_buf(struct imx7_csi *csi, enum vb2_buffer_state return_status) { - struct imx_media_buffer *buf; + struct imx7_csi_vb2_buffer *buf; int i; /* return any remaining active frames with return_status */ @@ -378,13 +436,36 @@ static void imx7_csi_dma_unsetup_vb2_buf(struct imx7_csi *csi, } } +static void imx7_csi_free_dma_buf(struct imx7_csi *csi, + struct imx7_csi_dma_buf *buf) +{ + if (buf->virt) + dma_free_coherent(csi->dev, buf->len, buf->virt, buf->phys); + + buf->virt = NULL; + buf->phys = 0; +} + +static int imx7_csi_alloc_dma_buf(struct imx7_csi *csi, + struct imx7_csi_dma_buf *buf, int size) +{ + imx7_csi_free_dma_buf(csi, buf); + + buf->len = PAGE_ALIGN(size); + buf->virt = dma_alloc_coherent(csi->dev, buf->len, &buf->phys, + GFP_DMA | GFP_KERNEL); + if (!buf->virt) + return -ENOMEM; + + return 0; +} + static int imx7_csi_dma_setup(struct imx7_csi *csi) { - struct imx_media_video_dev *vdev = csi->vdev; int ret; - ret = imx_media_alloc_dma_buf(csi->dev, &csi->underrun_buf, - vdev->fmt.sizeimage); + ret = imx7_csi_alloc_dma_buf(csi, &csi->underrun_buf, + csi->vdev_fmt.sizeimage); if (ret < 0) { v4l2_warn(&csi->sd, "consider increasing the CMA area\n"); return ret; @@ -403,7 +484,7 @@ static void imx7_csi_dma_cleanup(struct imx7_csi *csi, enum vb2_buffer_state return_status) { imx7_csi_dma_unsetup_vb2_buf(csi, return_status); - imx_media_free_dma_buf(csi->dev, &csi->underrun_buf); + imx7_csi_free_dma_buf(csi, &csi->underrun_buf); } static void imx7_csi_dma_stop(struct imx7_csi *csi) @@ -420,7 +501,7 @@ static void imx7_csi_dma_stop(struct imx7_csi *csi) /* * and then wait for interrupt handler to mark completion. */ - timeout_jiffies = msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT); + timeout_jiffies = msecs_to_jiffies(IMX7_CSI_VIDEO_EOF_TIMEOUT); ret = wait_for_completion_timeout(&csi->last_eof_completion, timeout_jiffies); if (ret == 0) @@ -431,8 +512,7 @@ static void imx7_csi_dma_stop(struct imx7_csi *csi) static void imx7_csi_configure(struct imx7_csi *csi) { - struct imx_media_video_dev *vdev = csi->vdev; - struct v4l2_pix_format *out_pix = &vdev->fmt; + struct v4l2_pix_format *out_pix = &csi->vdev_fmt; int width = out_pix->width; u32 stride = 0; u32 cr3 = BIT_FRMCNT_RST; @@ -631,14 +711,13 @@ static void imx7_csi_error_recovery(struct imx7_csi *csi) static void imx7_csi_vb2_buf_done(struct imx7_csi *csi) { - struct imx_media_video_dev *vdev = csi->vdev; - struct imx_media_buffer *done, *next; + struct imx7_csi_vb2_buffer *done, *next; struct vb2_buffer *vb; dma_addr_t phys; done = csi->active_vb2_buf[csi->buf_num]; if (done) { - done->vbuf.field = vdev->fmt.field; + done->vbuf.field = csi->vdev_fmt.field; done->vbuf.sequence = csi->frame_sequence; vb = &done->vbuf.vb2_buf; vb->timestamp = ktime_get_ns(); @@ -647,7 +726,7 @@ static void imx7_csi_vb2_buf_done(struct imx7_csi *csi) csi->frame_sequence++; /* get next queued buffer */ - next = imx_media_capture_device_next_buf(vdev); + next = imx7_csi_video_next_buf(csi); if (next) { phys = vb2_dma_contig_plane_dma_addr(&next->vbuf.vb2_buf, 0); csi->active_vb2_buf[csi->buf_num] = next; @@ -718,6 +797,831 @@ static irqreturn_t imx7_csi_irq_handler(int irq, void *data) } /* ----------------------------------------------------------------------------- + * Format Helpers + */ + +#define IMX_BUS_FMTS(fmt...) (const u32[]) {fmt, 0} + +/* + * List of supported pixel formats for the subdevs. Keep V4L2_PIX_FMT_UYVY and + * MEDIA_BUS_FMT_UYVY8_2X8 first to match IMX7_CSI_DEF_PIX_FORMAT and + * IMX7_CSI_DEF_MBUS_CODE. + */ +static const struct imx7_csi_pixfmt pixel_formats[] = { + /*** YUV formats start here ***/ + { + .fourcc = V4L2_PIX_FMT_UYVY, + .codes = IMX_BUS_FMTS( + MEDIA_BUS_FMT_UYVY8_2X8, + MEDIA_BUS_FMT_UYVY8_1X16 + ), + .yuv = true, + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_YUYV, + .codes = IMX_BUS_FMTS( + MEDIA_BUS_FMT_YUYV8_2X8, + MEDIA_BUS_FMT_YUYV8_1X16 + ), + .yuv = true, + .bpp = 16, + }, + /*** raw bayer and grayscale formats start here ***/ + { + .fourcc = V4L2_PIX_FMT_SBGGR8, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SBGGR8_1X8), + .bpp = 8, + }, { + .fourcc = V4L2_PIX_FMT_SGBRG8, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGBRG8_1X8), + .bpp = 8, + }, { + .fourcc = V4L2_PIX_FMT_SGRBG8, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGRBG8_1X8), + .bpp = 8, + }, { + .fourcc = V4L2_PIX_FMT_SRGGB8, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SRGGB8_1X8), + .bpp = 8, + }, { + .fourcc = V4L2_PIX_FMT_SBGGR10, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SBGGR10_1X10), + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_SGBRG10, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGBRG10_1X10), + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_SGRBG10, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGRBG10_1X10), + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_SRGGB10, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SRGGB10_1X10), + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_SBGGR12, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SBGGR12_1X12), + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_SGBRG12, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGBRG12_1X12), + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_SGRBG12, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGRBG12_1X12), + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_SRGGB12, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SRGGB12_1X12), + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_SBGGR14, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SBGGR14_1X14), + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_SGBRG14, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGBRG14_1X14), + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_SGRBG14, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGRBG14_1X14), + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_SRGGB14, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SRGGB14_1X14), + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_GREY, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y8_1X8), + .bpp = 8, + }, { + .fourcc = V4L2_PIX_FMT_Y10, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y10_1X10), + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_Y12, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y12_1X12), + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_Y14, + .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y14_1X14), + .bpp = 16, + }, +}; + +/* + * Search in the pixel_formats[] array for an entry with the given fourcc + * return it. + */ +static const struct imx7_csi_pixfmt *imx7_csi_find_pixel_format(u32 fourcc) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) { + const struct imx7_csi_pixfmt *fmt = &pixel_formats[i]; + + if (fmt->fourcc == fourcc) + return fmt; + } + + return NULL; +} + +/* + * Search in the pixel_formats[] array for an entry with the given media + * bus code and return it. + */ +static const struct imx7_csi_pixfmt *imx7_csi_find_mbus_format(u32 code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) { + const struct imx7_csi_pixfmt *fmt = &pixel_formats[i]; + unsigned int j; + + if (!fmt->codes) + continue; + + for (j = 0; fmt->codes[j]; j++) { + if (code == fmt->codes[j]) + return fmt; + } + } + + return NULL; +} + +/* + * Enumerate entries in the pixel_formats[] array that match the + * requested search criteria. Return the media-bus code that matches + * the search criteria at the requested match index. + * + * @code: The returned media-bus code that matches the search criteria at + * the requested match index. + * @index: The requested match index. + */ +static int imx7_csi_enum_mbus_formats(u32 *code, u32 index) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) { + const struct imx7_csi_pixfmt *fmt = &pixel_formats[i]; + unsigned int j; + + if (!fmt->codes) + continue; + + for (j = 0; fmt->codes[j]; j++) { + if (index == 0) { + *code = fmt->codes[j]; + return 0; + } + + index--; + } + } + + return -EINVAL; +} + +static int imx7_csi_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix, + const struct v4l2_mbus_framefmt *mbus, + const struct imx7_csi_pixfmt *cc) +{ + u32 width; + u32 stride; + + if (!cc) { + cc = imx7_csi_find_mbus_format(mbus->code); + if (!cc) + return -EINVAL; + } + + /* Round up width for minimum burst size */ + width = round_up(mbus->width, 8); + + /* Round up stride for IDMAC line start address alignment */ + stride = round_up((width * cc->bpp) >> 3, 8); + + pix->width = width; + pix->height = mbus->height; + pix->pixelformat = cc->fourcc; + pix->colorspace = mbus->colorspace; + pix->xfer_func = mbus->xfer_func; + pix->ycbcr_enc = mbus->ycbcr_enc; + pix->quantization = mbus->quantization; + pix->field = mbus->field; + pix->bytesperline = stride; + pix->sizeimage = stride * pix->height; + + return 0; +} + +/* ----------------------------------------------------------------------------- + * Video Capture Device - IOCTLs + */ + +static int imx7_csi_video_querycap(struct file *file, void *fh, + struct v4l2_capability *cap) +{ + struct imx7_csi *csi = video_drvdata(file); + + strscpy(cap->driver, IMX7_CSI_VIDEO_NAME, sizeof(cap->driver)); + strscpy(cap->card, IMX7_CSI_VIDEO_NAME, sizeof(cap->card)); + snprintf(cap->bus_info, sizeof(cap->bus_info), + "platform:%s", dev_name(csi->dev)); + + return 0; +} + +static int imx7_csi_video_enum_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_fmtdesc *f) +{ + unsigned int index = f->index; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) { + const struct imx7_csi_pixfmt *fmt = &pixel_formats[i]; + + /* + * If a media bus code is specified, only consider formats that + * match it. + */ + if (f->mbus_code) { + unsigned int j; + + if (!fmt->codes) + continue; + + for (j = 0; fmt->codes[j]; j++) { + if (f->mbus_code == fmt->codes[j]) + break; + } + + if (!fmt->codes[j]) + continue; + } + + if (index == 0) { + f->pixelformat = fmt->fourcc; + return 0; + } + + index--; + } + + return -EINVAL; +} + +static int imx7_csi_video_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) +{ + const struct imx7_csi_pixfmt *cc; + + if (fsize->index > 0) + return -EINVAL; + + cc = imx7_csi_find_pixel_format(fsize->pixel_format); + if (!cc) + return -EINVAL; + + /* + * TODO: The constraints are hardware-specific and may depend on the + * pixel format. This should come from the driver using + * imx_media_capture. + */ + fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS; + fsize->stepwise.min_width = 1; + fsize->stepwise.max_width = 65535; + fsize->stepwise.min_height = 1; + fsize->stepwise.max_height = 65535; + fsize->stepwise.step_width = 1; + fsize->stepwise.step_height = 1; + + return 0; +} + +static int imx7_csi_video_g_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct imx7_csi *csi = video_drvdata(file); + + f->fmt.pix = csi->vdev_fmt; + + return 0; +} + +static const struct imx7_csi_pixfmt * +__imx7_csi_video_try_fmt(struct v4l2_pix_format *pixfmt, + struct v4l2_rect *compose) +{ + struct v4l2_mbus_framefmt fmt_src; + const struct imx7_csi_pixfmt *cc; + + /* + * Find the pixel format, default to the first supported format if not + * found. + */ + cc = imx7_csi_find_pixel_format(pixfmt->pixelformat); + if (!cc) { + pixfmt->pixelformat = IMX7_CSI_DEF_PIX_FORMAT; + cc = imx7_csi_find_pixel_format(pixfmt->pixelformat); + } + + /* Allow IDMAC interweave but enforce field order from source. */ + if (V4L2_FIELD_IS_INTERLACED(pixfmt->field)) { + switch (pixfmt->field) { + case V4L2_FIELD_SEQ_TB: + pixfmt->field = V4L2_FIELD_INTERLACED_TB; + break; + case V4L2_FIELD_SEQ_BT: + pixfmt->field = V4L2_FIELD_INTERLACED_BT; + break; + default: + break; + } + } + + v4l2_fill_mbus_format(&fmt_src, pixfmt, 0); + imx7_csi_mbus_fmt_to_pix_fmt(pixfmt, &fmt_src, cc); + + if (compose) { + compose->width = fmt_src.width; + compose->height = fmt_src.height; + } + + return cc; +} + +static int imx7_csi_video_try_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + __imx7_csi_video_try_fmt(&f->fmt.pix, NULL); + return 0; +} + +static int imx7_csi_video_s_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct imx7_csi *csi = video_drvdata(file); + const struct imx7_csi_pixfmt *cc; + + if (vb2_is_busy(&csi->q)) { + dev_err(csi->dev, "%s queue busy\n", __func__); + return -EBUSY; + } + + cc = __imx7_csi_video_try_fmt(&f->fmt.pix, &csi->vdev_compose); + + csi->vdev_cc = cc; + csi->vdev_fmt = f->fmt.pix; + + return 0; +} + +static int imx7_csi_video_g_selection(struct file *file, void *fh, + struct v4l2_selection *s) +{ + struct imx7_csi *csi = video_drvdata(file); + + switch (s->target) { + case V4L2_SEL_TGT_COMPOSE: + case V4L2_SEL_TGT_COMPOSE_DEFAULT: + case V4L2_SEL_TGT_COMPOSE_BOUNDS: + /* The compose rectangle is fixed to the source format. */ + s->r = csi->vdev_compose; + break; + case V4L2_SEL_TGT_COMPOSE_PADDED: + /* + * The hardware writes with a configurable but fixed DMA burst + * size. If the source format width is not burst size aligned, + * the written frame contains padding to the right. + */ + s->r.left = 0; + s->r.top = 0; + s->r.width = csi->vdev_fmt.width; + s->r.height = csi->vdev_fmt.height; + break; + default: + return -EINVAL; + } + + return 0; +} + +static const struct v4l2_ioctl_ops imx7_csi_video_ioctl_ops = { + .vidioc_querycap = imx7_csi_video_querycap, + + .vidioc_enum_fmt_vid_cap = imx7_csi_video_enum_fmt_vid_cap, + .vidioc_enum_framesizes = imx7_csi_video_enum_framesizes, + + .vidioc_g_fmt_vid_cap = imx7_csi_video_g_fmt_vid_cap, + .vidioc_try_fmt_vid_cap = imx7_csi_video_try_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = imx7_csi_video_s_fmt_vid_cap, + + .vidioc_g_selection = imx7_csi_video_g_selection, + + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, +}; + +/* ----------------------------------------------------------------------------- + * Video Capture Device - Queue Operations + */ + +static int imx7_csi_video_queue_setup(struct vb2_queue *vq, + unsigned int *nbuffers, + unsigned int *nplanes, + unsigned int sizes[], + struct device *alloc_devs[]) +{ + struct imx7_csi *csi = vb2_get_drv_priv(vq); + struct v4l2_pix_format *pix = &csi->vdev_fmt; + unsigned int count = *nbuffers; + + if (vq->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + + if (*nplanes) { + if (*nplanes != 1 || sizes[0] < pix->sizeimage) + return -EINVAL; + count += vq->num_buffers; + } + + count = min_t(__u32, IMX7_CSI_VIDEO_MEM_LIMIT / pix->sizeimage, count); + + if (*nplanes) + *nbuffers = (count < vq->num_buffers) ? 0 : + count - vq->num_buffers; + else + *nbuffers = count; + + *nplanes = 1; + sizes[0] = pix->sizeimage; + + return 0; +} + +static int imx7_csi_video_buf_init(struct vb2_buffer *vb) +{ + struct imx7_csi_vb2_buffer *buf = to_imx7_csi_vb2_buffer(vb); + + INIT_LIST_HEAD(&buf->list); + + return 0; +} + +static int imx7_csi_video_buf_prepare(struct vb2_buffer *vb) +{ + struct imx7_csi *csi = vb2_get_drv_priv(vb->vb2_queue); + struct v4l2_pix_format *pix = &csi->vdev_fmt; + + if (vb2_plane_size(vb, 0) < pix->sizeimage) { + dev_err(csi->dev, + "data will not fit into plane (%lu < %lu)\n", + vb2_plane_size(vb, 0), (long)pix->sizeimage); + return -EINVAL; + } + + vb2_set_plane_payload(vb, 0, pix->sizeimage); + + return 0; +} + +static void imx7_csi_video_buf_queue(struct vb2_buffer *vb) +{ + struct imx7_csi *csi = vb2_get_drv_priv(vb->vb2_queue); + struct imx7_csi_vb2_buffer *buf = to_imx7_csi_vb2_buffer(vb); + unsigned long flags; + + spin_lock_irqsave(&csi->q_lock, flags); + + list_add_tail(&buf->list, &csi->ready_q); + + spin_unlock_irqrestore(&csi->q_lock, flags); +} + +static int imx7_csi_video_validate_fmt(struct imx7_csi *csi) +{ + struct v4l2_subdev_format fmt_src; + const struct imx7_csi_pixfmt *cc; + int ret; + + /* Retrieve the media bus format on the source subdev. */ + fmt_src.pad = IMX7_CSI_PAD_SRC; + fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE; + ret = v4l2_subdev_call(&csi->sd, pad, get_fmt, NULL, &fmt_src); + if (ret) + return ret; + + /* + * Verify that the media bus size matches the size set on the video + * node. It is sufficient to check the compose rectangle size without + * checking the rounded size from pix_fmt, as the rounded size is + * derived directly from the compose rectangle size, and will thus + * always match if the compose rectangle matches. + */ + if (csi->vdev_compose.width != fmt_src.format.width || + csi->vdev_compose.height != fmt_src.format.height) + return -EPIPE; + + /* + * Verify that the media bus code is compatible with the pixel format + * set on the video node. + */ + cc = imx7_csi_find_mbus_format(fmt_src.format.code); + if (!cc || csi->vdev_cc->yuv != cc->yuv) + return -EPIPE; + + return 0; +} + +static int imx7_csi_video_start_streaming(struct vb2_queue *vq, + unsigned int count) +{ + struct imx7_csi *csi = vb2_get_drv_priv(vq); + struct imx7_csi_vb2_buffer *buf, *tmp; + unsigned long flags; + int ret; + + ret = imx7_csi_video_validate_fmt(csi); + if (ret) { + dev_err(csi->dev, "capture format not valid\n"); + goto err_buffers; + } + + mutex_lock(&csi->mdev.graph_mutex); + + ret = __media_pipeline_start(&csi->sd.entity, &csi->pipe); + if (ret) + goto err_unlock; + + ret = v4l2_subdev_call(&csi->sd, video, s_stream, 1); + if (ret) + goto err_stop; + + mutex_unlock(&csi->mdev.graph_mutex); + + return 0; + +err_stop: + __media_pipeline_stop(&csi->sd.entity); +err_unlock: + mutex_unlock(&csi->mdev.graph_mutex); + dev_err(csi->dev, "pipeline start failed with %d\n", ret); +err_buffers: + spin_lock_irqsave(&csi->q_lock, flags); + list_for_each_entry_safe(buf, tmp, &csi->ready_q, list) { + list_del(&buf->list); + vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_QUEUED); + } + spin_unlock_irqrestore(&csi->q_lock, flags); + return ret; +} + +static void imx7_csi_video_stop_streaming(struct vb2_queue *vq) +{ + struct imx7_csi *csi = vb2_get_drv_priv(vq); + struct imx7_csi_vb2_buffer *frame; + struct imx7_csi_vb2_buffer *tmp; + unsigned long flags; + + mutex_lock(&csi->mdev.graph_mutex); + v4l2_subdev_call(&csi->sd, video, s_stream, 0); + __media_pipeline_stop(&csi->sd.entity); + mutex_unlock(&csi->mdev.graph_mutex); + + /* release all active buffers */ + spin_lock_irqsave(&csi->q_lock, flags); + list_for_each_entry_safe(frame, tmp, &csi->ready_q, list) { + list_del(&frame->list); + vb2_buffer_done(&frame->vbuf.vb2_buf, VB2_BUF_STATE_ERROR); + } + spin_unlock_irqrestore(&csi->q_lock, flags); +} + +static const struct vb2_ops imx7_csi_video_qops = { + .queue_setup = imx7_csi_video_queue_setup, + .buf_init = imx7_csi_video_buf_init, + .buf_prepare = imx7_csi_video_buf_prepare, + .buf_queue = imx7_csi_video_buf_queue, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, + .start_streaming = imx7_csi_video_start_streaming, + .stop_streaming = imx7_csi_video_stop_streaming, +}; + +/* ----------------------------------------------------------------------------- + * Video Capture Device - File Operations + */ + +static int imx7_csi_video_open(struct file *file) +{ + struct imx7_csi *csi = video_drvdata(file); + int ret; + + if (mutex_lock_interruptible(&csi->vdev_mutex)) + return -ERESTARTSYS; + + ret = v4l2_fh_open(file); + if (ret) { + dev_err(csi->dev, "v4l2_fh_open failed\n"); + goto out; + } + + ret = v4l2_pipeline_pm_get(&csi->vdev->entity); + if (ret) + v4l2_fh_release(file); + +out: + mutex_unlock(&csi->vdev_mutex); + return ret; +} + +static int imx7_csi_video_release(struct file *file) +{ + struct imx7_csi *csi = video_drvdata(file); + struct vb2_queue *vq = &csi->q; + + mutex_lock(&csi->vdev_mutex); + + if (file->private_data == vq->owner) { + vb2_queue_release(vq); + vq->owner = NULL; + } + + v4l2_pipeline_pm_put(&csi->vdev->entity); + + v4l2_fh_release(file); + mutex_unlock(&csi->vdev_mutex); + return 0; +} + +static const struct v4l2_file_operations imx7_csi_video_fops = { + .owner = THIS_MODULE, + .open = imx7_csi_video_open, + .release = imx7_csi_video_release, + .poll = vb2_fop_poll, + .unlocked_ioctl = video_ioctl2, + .mmap = vb2_fop_mmap, +}; + +/* ----------------------------------------------------------------------------- + * Video Capture Device - Init & Cleanup + */ + +static struct imx7_csi_vb2_buffer *imx7_csi_video_next_buf(struct imx7_csi *csi) +{ + struct imx7_csi_vb2_buffer *buf = NULL; + unsigned long flags; + + spin_lock_irqsave(&csi->q_lock, flags); + + /* get next queued buffer */ + if (!list_empty(&csi->ready_q)) { + buf = list_entry(csi->ready_q.next, struct imx7_csi_vb2_buffer, + list); + list_del(&buf->list); + } + + spin_unlock_irqrestore(&csi->q_lock, flags); + + return buf; +} + +static int imx7_csi_video_init_format(struct imx7_csi *csi) +{ + struct v4l2_subdev_format fmt_src = { + .pad = IMX7_CSI_PAD_SRC, + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + fmt_src.format.code = IMX7_CSI_DEF_MBUS_CODE; + fmt_src.format.width = IMX7_CSI_DEF_PIX_WIDTH; + fmt_src.format.height = IMX7_CSI_DEF_PIX_HEIGHT; + + imx7_csi_mbus_fmt_to_pix_fmt(&csi->vdev_fmt, &fmt_src.format, NULL); + csi->vdev_compose.width = fmt_src.format.width; + csi->vdev_compose.height = fmt_src.format.height; + + csi->vdev_cc = imx7_csi_find_pixel_format(csi->vdev_fmt.pixelformat); + + return 0; +} + +static int imx7_csi_video_register(struct imx7_csi *csi) +{ + struct v4l2_subdev *sd = &csi->sd; + struct v4l2_device *v4l2_dev = sd->v4l2_dev; + struct video_device *vdev = csi->vdev; + int ret; + + vdev->v4l2_dev = v4l2_dev; + + /* Initialize the default format and compose rectangle. */ + ret = imx7_csi_video_init_format(csi); + if (ret < 0) + return ret; + + /* Register the video device. */ + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); + if (ret) { + dev_err(csi->dev, "Failed to register video device\n"); + return ret; + } + + dev_info(csi->dev, "Registered %s as /dev/%s\n", vdev->name, + video_device_node_name(vdev)); + + /* Create the link from the CSI subdev to the video device. */ + ret = media_create_pad_link(&sd->entity, IMX7_CSI_PAD_SRC, + &vdev->entity, 0, MEDIA_LNK_FL_IMMUTABLE | + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_err(csi->dev, "failed to create link to device node\n"); + video_unregister_device(vdev); + return ret; + } + + return 0; +} + +static void imx7_csi_video_unregister(struct imx7_csi *csi) +{ + media_entity_cleanup(&csi->vdev->entity); + video_unregister_device(csi->vdev); +} + +static int imx7_csi_video_init(struct imx7_csi *csi) +{ + struct video_device *vdev; + struct vb2_queue *vq; + int ret; + + mutex_init(&csi->vdev_mutex); + INIT_LIST_HEAD(&csi->ready_q); + spin_lock_init(&csi->q_lock); + + /* Allocate and initialize the video device. */ + vdev = video_device_alloc(); + if (!vdev) + return -ENOMEM; + + vdev->fops = &imx7_csi_video_fops; + vdev->ioctl_ops = &imx7_csi_video_ioctl_ops; + vdev->minor = -1; + vdev->release = video_device_release; + vdev->vfl_dir = VFL_DIR_RX; + vdev->tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM; + vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING + | V4L2_CAP_IO_MC; + vdev->lock = &csi->vdev_mutex; + vdev->queue = &csi->q; + + snprintf(vdev->name, sizeof(vdev->name), "%s capture", csi->sd.name); + + video_set_drvdata(vdev, csi); + csi->vdev = vdev; + + /* Initialize the video device pad. */ + csi->vdev_pad.flags = MEDIA_PAD_FL_SINK; + ret = media_entity_pads_init(&vdev->entity, 1, &csi->vdev_pad); + if (ret) { + video_device_release(vdev); + return ret; + } + + /* Initialize the vb2 queue. */ + vq = &csi->q; + vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + vq->io_modes = VB2_MMAP | VB2_DMABUF; + vq->drv_priv = csi; + vq->buf_struct_size = sizeof(struct imx7_csi_vb2_buffer); + vq->ops = &imx7_csi_video_qops; + vq->mem_ops = &vb2_dma_contig_memops; + vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + vq->lock = &csi->vdev_mutex; + vq->min_buffers_needed = 2; + vq->dev = csi->dev; + + ret = vb2_queue_init(vq); + if (ret) { + dev_err(csi->dev, "vb2_queue_init failed\n"); + video_device_release(vdev); + return ret; + } + + return 0; +} + +/* ----------------------------------------------------------------------------- * V4L2 Subdev Operations */ @@ -764,36 +1668,48 @@ out_unlock: return ret; } +static struct v4l2_mbus_framefmt * +imx7_csi_get_format(struct imx7_csi *csi, + struct v4l2_subdev_state *sd_state, + unsigned int pad, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(&csi->sd, sd_state, pad); + + return &csi->format_mbus[pad]; +} + static int imx7_csi_init_cfg(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state) { + const enum v4l2_subdev_format_whence which = + sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; struct imx7_csi *csi = v4l2_get_subdevdata(sd); - struct v4l2_mbus_framefmt *mf; - int ret; + const struct imx7_csi_pixfmt *cc; int i; + cc = imx7_csi_find_mbus_format(IMX7_CSI_DEF_MBUS_CODE); + for (i = 0; i < IMX7_CSI_PADS_NUM; i++) { - mf = v4l2_subdev_get_try_format(sd, sd_state, i); + struct v4l2_mbus_framefmt *mf = + imx7_csi_get_format(csi, sd_state, i, which); - ret = imx_media_init_mbus_fmt(mf, 800, 600, 0, V4L2_FIELD_NONE, - &csi->cc[i]); - if (ret < 0) - return ret; - } + mf->code = IMX7_CSI_DEF_MBUS_CODE; + mf->width = IMX7_CSI_DEF_PIX_WIDTH; + mf->height = IMX7_CSI_DEF_PIX_HEIGHT; + mf->field = V4L2_FIELD_NONE; - return 0; -} + mf->colorspace = V4L2_COLORSPACE_SRGB; + mf->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(mf->colorspace); + mf->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(mf->colorspace); + mf->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(!cc->yuv, + mf->colorspace, mf->ycbcr_enc); -static struct v4l2_mbus_framefmt * -imx7_csi_get_format(struct imx7_csi *csi, - struct v4l2_subdev_state *sd_state, - unsigned int pad, - enum v4l2_subdev_format_whence which) -{ - if (which == V4L2_SUBDEV_FORMAT_TRY) - return v4l2_subdev_get_try_format(&csi->sd, sd_state, pad); + csi->cc[i] = cc; + } - return &csi->format_mbus[pad]; + return 0; } static int imx7_csi_enum_mbus_code(struct v4l2_subdev *sd, @@ -811,8 +1727,7 @@ static int imx7_csi_enum_mbus_code(struct v4l2_subdev *sd, switch (code->pad) { case IMX7_CSI_PAD_SINK: - ret = imx_media_enum_mbus_formats(&code->code, code->index, - PIXFMT_SEL_ANY); + ret = imx7_csi_enum_mbus_formats(&code->code, code->index); break; case IMX7_CSI_PAD_SRC: if (code->index != 0) { @@ -857,12 +1772,58 @@ out_unlock: return ret; } +/* + * Default the colorspace in tryfmt to SRGB if set to an unsupported + * colorspace or not initialized. Then set the remaining colorimetry + * parameters based on the colorspace if they are uninitialized. + * + * tryfmt->code must be set on entry. + */ +static void imx7_csi_try_colorimetry(struct v4l2_mbus_framefmt *tryfmt) +{ + const struct imx7_csi_pixfmt *cc; + bool is_rgb = false; + + cc = imx7_csi_find_mbus_format(tryfmt->code); + if (cc && !cc->yuv) + is_rgb = true; + + switch (tryfmt->colorspace) { + case V4L2_COLORSPACE_SMPTE170M: + case V4L2_COLORSPACE_REC709: + case V4L2_COLORSPACE_JPEG: + case V4L2_COLORSPACE_SRGB: + case V4L2_COLORSPACE_BT2020: + case V4L2_COLORSPACE_OPRGB: + case V4L2_COLORSPACE_DCI_P3: + case V4L2_COLORSPACE_RAW: + break; + default: + tryfmt->colorspace = V4L2_COLORSPACE_SRGB; + break; + } + + if (tryfmt->xfer_func == V4L2_XFER_FUNC_DEFAULT) + tryfmt->xfer_func = + V4L2_MAP_XFER_FUNC_DEFAULT(tryfmt->colorspace); + + if (tryfmt->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT) + tryfmt->ycbcr_enc = + V4L2_MAP_YCBCR_ENC_DEFAULT(tryfmt->colorspace); + + if (tryfmt->quantization == V4L2_QUANTIZATION_DEFAULT) + tryfmt->quantization = + V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb, + tryfmt->colorspace, + tryfmt->ycbcr_enc); +} + static int imx7_csi_try_fmt(struct imx7_csi *csi, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *sdformat, - const struct imx_media_pixfmt **cc) + const struct imx7_csi_pixfmt **cc) { - const struct imx_media_pixfmt *in_cc; + const struct imx7_csi_pixfmt *in_cc; struct v4l2_mbus_framefmt *in_fmt; u32 code; @@ -873,8 +1834,7 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi, switch (sdformat->pad) { case IMX7_CSI_PAD_SRC: - in_cc = imx_media_find_mbus_format(in_fmt->code, - PIXFMT_SEL_ANY); + in_cc = imx7_csi_find_mbus_format(in_fmt->code); sdformat->format.width = in_fmt->width; sdformat->format.height = in_fmt->height; @@ -888,14 +1848,11 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi, sdformat->format.ycbcr_enc = in_fmt->ycbcr_enc; break; case IMX7_CSI_PAD_SINK: - *cc = imx_media_find_mbus_format(sdformat->format.code, - PIXFMT_SEL_ANY); + *cc = imx7_csi_find_mbus_format(sdformat->format.code); if (!*cc) { - imx_media_enum_mbus_formats(&code, 0, - PIXFMT_SEL_YUV_RGB); - *cc = imx_media_find_mbus_format(code, - PIXFMT_SEL_YUV_RGB); - sdformat->format.code = (*cc)->codes[0]; + code = IMX7_CSI_DEF_MBUS_CODE; + *cc = imx7_csi_find_mbus_format(code); + sdformat->format.code = code; } if (sdformat->format.field != V4L2_FIELD_INTERLACED) @@ -905,7 +1862,7 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi, return -EINVAL; } - imx_media_try_colorimetry(&sdformat->format, false); + imx7_csi_try_colorimetry(&sdformat->format); return 0; } @@ -915,9 +1872,9 @@ static int imx7_csi_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_format *sdformat) { struct imx7_csi *csi = v4l2_get_subdevdata(sd); - const struct imx_media_pixfmt *outcc; + const struct imx7_csi_pixfmt *outcc; struct v4l2_mbus_framefmt *outfmt; - const struct imx_media_pixfmt *cc; + const struct imx7_csi_pixfmt *cc; struct v4l2_mbus_framefmt *fmt; struct v4l2_subdev_format format; int ret = 0; @@ -977,9 +1934,8 @@ static int imx7_csi_pad_link_validate(struct v4l2_subdev *sd, struct v4l2_subdev_format *sink_fmt) { struct imx7_csi *csi = v4l2_get_subdevdata(sd); - struct imx_media_video_dev *vdev = csi->vdev; - const struct v4l2_pix_format *out_pix = &vdev->fmt; - struct media_pad *pad; + struct media_pad *pad = NULL; + unsigned int i; int ret; if (!csi->src_sd) @@ -1001,7 +1957,17 @@ static int imx7_csi_pad_link_validate(struct v4l2_subdev *sd, case MEDIA_ENT_F_VID_MUX: /* The input is the mux, check its input. */ - pad = imx_media_pipeline_pad(&csi->src_sd->entity, 0, 0, true); + for (i = 0; i < csi->src_sd->entity.num_pads; i++) { + struct media_pad *spad = &csi->src_sd->entity.pads[i]; + + if (!(spad->flags & MEDIA_PAD_FL_SINK)) + continue; + + pad = media_pad_remote_pad_first(spad); + if (pad) + break; + } + if (!pad) return -ENODEV; @@ -1017,29 +1983,6 @@ static int imx7_csi_pad_link_validate(struct v4l2_subdev *sd, break; } - /* Validate the sink link, ensure the pixel format is supported. */ - switch (out_pix->pixelformat) { - case V4L2_PIX_FMT_UYVY: - case V4L2_PIX_FMT_YUYV: - case V4L2_PIX_FMT_GREY: - case V4L2_PIX_FMT_Y10: - case V4L2_PIX_FMT_Y12: - case V4L2_PIX_FMT_SBGGR8: - case V4L2_PIX_FMT_SGBRG8: - case V4L2_PIX_FMT_SGRBG8: - case V4L2_PIX_FMT_SRGGB8: - case V4L2_PIX_FMT_SBGGR16: - case V4L2_PIX_FMT_SGBRG16: - case V4L2_PIX_FMT_SGRBG16: - case V4L2_PIX_FMT_SRGGB16: - break; - - default: - dev_dbg(csi->dev, "Invalid capture pixel format 0x%08x\n", - out_pix->pixelformat); - return -EINVAL; - } - return 0; } @@ -1047,31 +1990,27 @@ static int imx7_csi_registered(struct v4l2_subdev *sd) { struct imx7_csi *csi = v4l2_get_subdevdata(sd); int ret; - int i; - for (i = 0; i < IMX7_CSI_PADS_NUM; i++) { - /* set a default mbus format */ - ret = imx_media_init_mbus_fmt(&csi->format_mbus[i], - 800, 600, 0, V4L2_FIELD_NONE, - &csi->cc[i]); - if (ret < 0) - return ret; + ret = imx7_csi_video_init(csi); + if (ret) + return ret; - /* init default frame interval */ - csi->frame_interval[i].numerator = 1; - csi->frame_interval[i].denominator = 30; - } + ret = imx7_csi_video_register(csi); + if (ret) + return ret; - csi->vdev = imx_media_capture_device_init(csi->sd.dev, &csi->sd, - IMX7_CSI_PAD_SRC, false); - if (IS_ERR(csi->vdev)) - return PTR_ERR(csi->vdev); + ret = v4l2_device_register_subdev_nodes(&csi->v4l2_dev); + if (ret) + goto err_unreg; - ret = imx_media_capture_device_register(csi->vdev, - MEDIA_LNK_FL_IMMUTABLE); + ret = media_device_register(&csi->mdev); if (ret) - imx_media_capture_device_remove(csi->vdev); + goto err_unreg; + + return 0; +err_unreg: + imx7_csi_video_unregister(csi); return ret; } @@ -1079,8 +2018,7 @@ static void imx7_csi_unregistered(struct v4l2_subdev *sd) { struct imx7_csi *csi = v4l2_get_subdevdata(sd); - imx_media_capture_device_unregister(csi->vdev); - imx_media_capture_device_remove(csi->vdev); + imx7_csi_video_unregister(csi); } static const struct v4l2_subdev_video_ops imx7_csi_video_ops = { @@ -1125,21 +2063,22 @@ static int imx7_csi_notify_bound(struct v4l2_async_notifier *notifier, struct imx7_csi *csi = imx7_csi_notifier_to_dev(notifier); struct media_pad *sink = &csi->sd.entity.pads[IMX7_CSI_PAD_SINK]; - /* - * If the subdev is a video mux, it must be one of the CSI - * muxes. Mark it as such via its group id. - */ - if (sd->entity.function == MEDIA_ENT_F_VID_MUX) - sd->grp_id = IMX_MEDIA_GRP_ID_CSI_MUX; - csi->src_sd = sd; return v4l2_create_fwnode_links_to_pad(sd, sink, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); } +static int imx7_csi_notify_complete(struct v4l2_async_notifier *notifier) +{ + struct imx7_csi *csi = imx7_csi_notifier_to_dev(notifier); + + return v4l2_device_register_subdev_nodes(&csi->v4l2_dev); +} + static const struct v4l2_async_notifier_operations imx7_csi_notify_ops = { .bound = imx7_csi_notify_bound, + .complete = imx7_csi_notify_complete, }; static int imx7_csi_async_register(struct imx7_csi *csi) @@ -1168,48 +2107,133 @@ static int imx7_csi_async_register(struct imx7_csi *csi) csi->notifier.ops = &imx7_csi_notify_ops; - ret = v4l2_async_subdev_nf_register(&csi->sd, &csi->notifier); + ret = v4l2_async_nf_register(&csi->v4l2_dev, &csi->notifier); if (ret) return ret; - return v4l2_async_register_subdev(&csi->sd); + return 0; +} + +static void imx7_csi_media_cleanup(struct imx7_csi *csi) +{ + v4l2_device_unregister(&csi->v4l2_dev); + media_device_unregister(&csi->mdev); + media_device_cleanup(&csi->mdev); +} + +static const struct media_device_ops imx7_csi_media_ops = { + .link_notify = v4l2_pipeline_link_notify, +}; + +static int imx7_csi_media_dev_init(struct imx7_csi *csi) +{ + int ret; + + strscpy(csi->mdev.model, "imx-media", sizeof(csi->mdev.model)); + csi->mdev.ops = &imx7_csi_media_ops; + csi->mdev.dev = csi->dev; + + csi->v4l2_dev.mdev = &csi->mdev; + strscpy(csi->v4l2_dev.name, "imx-media", + sizeof(csi->v4l2_dev.name)); + snprintf(csi->mdev.bus_info, sizeof(csi->mdev.bus_info), + "platform:%s", dev_name(csi->mdev.dev)); + + media_device_init(&csi->mdev); + + ret = v4l2_device_register(csi->dev, &csi->v4l2_dev); + if (ret < 0) { + v4l2_err(&csi->v4l2_dev, + "Failed to register v4l2_device: %d\n", ret); + goto cleanup; + } + + return 0; + +cleanup: + media_device_cleanup(&csi->mdev); + + return ret; +} + +static int imx7_csi_media_init(struct imx7_csi *csi) +{ + unsigned int i; + int ret; + + /* add media device */ + ret = imx7_csi_media_dev_init(csi); + if (ret) + return ret; + + v4l2_subdev_init(&csi->sd, &imx7_csi_subdev_ops); + v4l2_set_subdevdata(&csi->sd, csi); + csi->sd.internal_ops = &imx7_csi_internal_ops; + csi->sd.entity.ops = &imx7_csi_entity_ops; + csi->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; + csi->sd.dev = csi->dev; + csi->sd.owner = THIS_MODULE; + csi->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE; + snprintf(csi->sd.name, sizeof(csi->sd.name), "csi"); + + for (i = 0; i < IMX7_CSI_PADS_NUM; i++) + csi->pad[i].flags = (i == IMX7_CSI_PAD_SINK) ? + MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE; + + ret = media_entity_pads_init(&csi->sd.entity, IMX7_CSI_PADS_NUM, + csi->pad); + if (ret) + goto error; + + ret = v4l2_device_register_subdev(&csi->v4l2_dev, &csi->sd); + if (ret) + goto error; + + return 0; + +error: + imx7_csi_media_cleanup(csi); + return ret; } static int imx7_csi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *node = dev->of_node; - struct imx_media_dev *imxmd; struct imx7_csi *csi; - int i, ret; + int ret; csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL); if (!csi) return -ENOMEM; csi->dev = dev; + platform_set_drvdata(pdev, csi); + + spin_lock_init(&csi->irqlock); + mutex_init(&csi->lock); + /* Acquire resources and install interrupt handler. */ csi->mclk = devm_clk_get(&pdev->dev, "mclk"); if (IS_ERR(csi->mclk)) { ret = PTR_ERR(csi->mclk); dev_err(dev, "Failed to get mclk: %d", ret); - return ret; + goto destroy_mutex; } csi->irq = platform_get_irq(pdev, 0); - if (csi->irq < 0) - return csi->irq; + if (csi->irq < 0) { + ret = csi->irq; + goto destroy_mutex; + } csi->regbase = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(csi->regbase)) - return PTR_ERR(csi->regbase); + if (IS_ERR(csi->regbase)) { + ret = PTR_ERR(csi->regbase); + goto destroy_mutex; + } csi->model = (enum imx_csi_model)(uintptr_t)of_device_get_match_data(&pdev->dev); - spin_lock_init(&csi->irqlock); - mutex_init(&csi->lock); - - /* install interrupt handler */ ret = devm_request_irq(dev, csi->irq, imx7_csi_irq_handler, 0, "csi", (void *)csi); if (ret < 0) { @@ -1217,42 +2241,15 @@ static int imx7_csi_probe(struct platform_device *pdev) goto destroy_mutex; } - /* add media device */ - imxmd = imx_media_dev_init(dev, NULL); - if (IS_ERR(imxmd)) { - ret = PTR_ERR(imxmd); + /* Initialize all the media device infrastructure. */ + ret = imx7_csi_media_init(csi); + if (ret) goto destroy_mutex; - } - platform_set_drvdata(pdev, &csi->sd); - - ret = imx_media_of_add_csi(imxmd, node); - if (ret < 0 && ret != -ENODEV && ret != -EEXIST) - goto cleanup; - - ret = imx_media_dev_notifier_register(imxmd, NULL); - if (ret < 0) - goto cleanup; - - csi->imxmd = imxmd; - v4l2_subdev_init(&csi->sd, &imx7_csi_subdev_ops); - v4l2_set_subdevdata(&csi->sd, csi); - csi->sd.internal_ops = &imx7_csi_internal_ops; - csi->sd.entity.ops = &imx7_csi_entity_ops; - csi->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; - csi->sd.dev = &pdev->dev; - csi->sd.owner = THIS_MODULE; - csi->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE; - csi->sd.grp_id = IMX_MEDIA_GRP_ID_CSI; - snprintf(csi->sd.name, sizeof(csi->sd.name), "csi"); - - for (i = 0; i < IMX7_CSI_PADS_NUM; i++) - csi->pad[i].flags = (i == IMX7_CSI_PAD_SINK) ? - MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE; - ret = media_entity_pads_init(&csi->sd.entity, IMX7_CSI_PADS_NUM, - csi->pad); - if (ret < 0) - goto cleanup; + /* Set the default mbus formats. */ + ret = imx7_csi_init_cfg(&csi->sd, NULL); + if (ret) + goto media_cleanup; ret = imx7_csi_async_register(csi); if (ret) @@ -1263,13 +2260,8 @@ static int imx7_csi_probe(struct platform_device *pdev) subdev_notifier_cleanup: v4l2_async_nf_unregister(&csi->notifier); v4l2_async_nf_cleanup(&csi->notifier); - -cleanup: - v4l2_async_nf_unregister(&imxmd->notifier); - v4l2_async_nf_cleanup(&imxmd->notifier); - v4l2_device_unregister(&imxmd->v4l2_dev); - media_device_unregister(&imxmd->md); - media_device_cleanup(&imxmd->md); +media_cleanup: + imx7_csi_media_cleanup(csi); destroy_mutex: mutex_destroy(&csi->lock); @@ -1279,20 +2271,13 @@ destroy_mutex: static int imx7_csi_remove(struct platform_device *pdev) { - struct v4l2_subdev *sd = platform_get_drvdata(pdev); - struct imx7_csi *csi = v4l2_get_subdevdata(sd); - struct imx_media_dev *imxmd = csi->imxmd; - - v4l2_async_nf_unregister(&imxmd->notifier); - v4l2_async_nf_cleanup(&imxmd->notifier); + struct imx7_csi *csi = platform_get_drvdata(pdev); - media_device_unregister(&imxmd->md); - v4l2_device_unregister(&imxmd->v4l2_dev); - media_device_cleanup(&imxmd->md); + imx7_csi_media_cleanup(csi); v4l2_async_nf_unregister(&csi->notifier); v4l2_async_nf_cleanup(&csi->notifier); - v4l2_async_unregister_subdev(sd); + v4l2_async_unregister_subdev(&csi->sd); mutex_destroy(&csi->lock); diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c index 68588e9dab0b..28aacda0f5a7 100644 --- a/drivers/staging/media/omap4iss/iss.c +++ b/drivers/staging/media/omap4iss/iss.c @@ -395,7 +395,7 @@ static int iss_pipeline_disable(struct iss_pipeline *pipe, if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; - pad = media_entity_remote_pad(pad); + pad = media_pad_remote_pad_first(pad); if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) break; @@ -464,7 +464,7 @@ static int iss_pipeline_enable(struct iss_pipeline *pipe, if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; - pad = media_entity_remote_pad(pad); + pad = media_pad_remote_pad_first(pad); if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) break; @@ -553,7 +553,7 @@ static int iss_pipeline_is_last(struct media_entity *me) pipe = to_iss_pipeline(me); if (pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED) return 0; - pad = media_entity_remote_pad(&pipe->output->pad); + pad = media_pad_remote_pad_first(&pipe->output->pad); return pad->entity == me; } diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c index 124ab2f44fbf..04ce0e7eb557 100644 --- a/drivers/staging/media/omap4iss/iss_csi2.c +++ b/drivers/staging/media/omap4iss/iss_csi2.c @@ -538,7 +538,7 @@ static int csi2_configure(struct iss_csi2_device *csi2) if (csi2->contexts[0].enabled || csi2->ctrl.if_enable) return -EBUSY; - pad = media_entity_remote_pad(&csi2->pads[CSI2_PAD_SINK]); + pad = media_pad_remote_pad_first(&csi2->pads[CSI2_PAD_SINK]); sensor = media_entity_to_v4l2_subdev(pad->entity); pdata = sensor->host_priv; diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c index d0da083deed5..9512cd3314f2 100644 --- a/drivers/staging/media/omap4iss/iss_video.c +++ b/drivers/staging/media/omap4iss/iss_video.c @@ -190,7 +190,7 @@ iss_video_remote_subdev(struct iss_video *video, u32 *pad) { struct media_pad *remote; - remote = media_entity_remote_pad(&video->pad); + remote = media_pad_remote_pad_first(&video->pad); if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) return NULL; diff --git a/drivers/staging/media/rkvdec/rkvdec-h264.c b/drivers/staging/media/rkvdec/rkvdec-h264.c index 2992fb87cf72..4af5a831bde0 100644 --- a/drivers/staging/media/rkvdec/rkvdec-h264.c +++ b/drivers/staging/media/rkvdec/rkvdec-h264.c @@ -109,7 +109,7 @@ struct rkvdec_h264_run { const struct v4l2_ctrl_h264_sps *sps; const struct v4l2_ctrl_h264_pps *pps; const struct v4l2_ctrl_h264_scaling_matrix *scaling_matrix; - int ref_buf_idx[V4L2_H264_NUM_DPB_ENTRIES]; + struct vb2_buffer *ref_buf[V4L2_H264_NUM_DPB_ENTRIES]; }; struct rkvdec_h264_ctx { @@ -742,17 +742,16 @@ static void lookup_ref_buf_idx(struct rkvdec_ctx *ctx, struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx; const struct v4l2_h264_dpb_entry *dpb = run->decode_params->dpb; struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q; - int buf_idx = -1; + struct vb2_buffer *buf = NULL; if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE) { - buf_idx = vb2_find_timestamp(cap_q, - dpb[i].reference_ts, 0); - if (buf_idx < 0) + buf = vb2_find_buffer(cap_q, dpb[i].reference_ts); + if (!buf) pr_debug("No buffer for reference_ts %llu", dpb[i].reference_ts); } - run->ref_buf_idx[i] = buf_idx; + run->ref_buf[i] = buf; } } @@ -805,7 +804,7 @@ static void assemble_hw_rps(struct rkvdec_ctx *ctx, if (WARN_ON(ref->index >= ARRAY_SIZE(dec_params->dpb))) continue; - dpb_valid = run->ref_buf_idx[ref->index] >= 0; + dpb_valid = run->ref_buf[ref->index] != NULL; bottom = ref->fields == V4L2_H264_BOTTOM_FIELD_REF; set_ps_field(hw_rps, DPB_INFO(i, j), @@ -881,24 +880,6 @@ static const u32 poc_reg_tbl_bottom_field[16] = { RKVDEC_REG_H264_POC_REFER2(1) }; -static struct vb2_buffer * -get_ref_buf(struct rkvdec_ctx *ctx, struct rkvdec_h264_run *run, - unsigned int dpb_idx) -{ - struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx; - struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q; - int buf_idx = run->ref_buf_idx[dpb_idx]; - - /* - * If a DPB entry is unused or invalid, address of current destination - * buffer is returned. - */ - if (buf_idx < 0) - return &run->base.bufs.dst->vb2_buf; - - return vb2_get_buffer(cap_q, buf_idx); -} - static void config_registers(struct rkvdec_ctx *ctx, struct rkvdec_h264_run *run) { @@ -971,8 +952,14 @@ static void config_registers(struct rkvdec_ctx *ctx, /* config ref pic address & poc */ for (i = 0; i < ARRAY_SIZE(dec_params->dpb); i++) { - struct vb2_buffer *vb_buf = get_ref_buf(ctx, run, i); - + struct vb2_buffer *vb_buf = run->ref_buf[i]; + + /* + * If a DPB entry is unused or invalid, address of current destination + * buffer is returned. + */ + if (!vb_buf) + vb_buf = &dst_buf->vb2_buf; refer_addr = vb2_dma_contig_plane_dma_addr(vb_buf, 0); if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE) diff --git a/drivers/staging/media/rkvdec/rkvdec-vp9.c b/drivers/staging/media/rkvdec/rkvdec-vp9.c index 311a12656072..d8c1c0db15c7 100644 --- a/drivers/staging/media/rkvdec/rkvdec-vp9.c +++ b/drivers/staging/media/rkvdec/rkvdec-vp9.c @@ -383,17 +383,17 @@ get_ref_buf(struct rkvdec_ctx *ctx, struct vb2_v4l2_buffer *dst, u64 timestamp) { struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx; struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q; - int buf_idx; + struct vb2_buffer *buf; /* * If a ref is unused or invalid, address of current destination * buffer is returned. */ - buf_idx = vb2_find_timestamp(cap_q, timestamp, 0); - if (buf_idx < 0) - return vb2_to_rkvdec_decoded_buf(&dst->vb2_buf); + buf = vb2_find_buffer(cap_q, timestamp); + if (!buf) + buf = &dst->vb2_buf; - return vb2_to_rkvdec_decoded_buf(vb2_get_buffer(cap_q, buf_idx)); + return vb2_to_rkvdec_decoded_buf(buf); } static dma_addr_t get_mv_base_addr(struct rkvdec_decoded_buffer *buf) @@ -1015,7 +1015,6 @@ static int rkvdec_vp9_start(struct rkvdec_ctx *ctx) vp9_ctx->priv_tbl.size = sizeof(*priv_tbl); vp9_ctx->priv_tbl.cpu = priv_tbl; - memset(priv_tbl, 0, sizeof(*priv_tbl)); count_tbl = dma_alloc_coherent(rkvdec->dev, RKVDEC_VP9_COUNT_SIZE, &vp9_ctx->count_tbl.dma, GFP_KERNEL); @@ -1026,7 +1025,6 @@ static int rkvdec_vp9_start(struct rkvdec_ctx *ctx) vp9_ctx->count_tbl.size = RKVDEC_VP9_COUNT_SIZE; vp9_ctx->count_tbl.cpu = count_tbl; - memset(count_tbl, 0, sizeof(*count_tbl)); rkvdec_init_v4l2_vp9_count_tbl(ctx); return 0; diff --git a/drivers/staging/media/stkwebcam/Kconfig b/drivers/staging/media/stkwebcam/Kconfig new file mode 100644 index 000000000000..4450403dff41 --- /dev/null +++ b/drivers/staging/media/stkwebcam/Kconfig @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only +config VIDEO_STKWEBCAM + tristate "USB Syntek DC1125 Camera support (DEPRECATED)" + depends on VIDEO_DEV + depends on USB + help + Say Y here if you want to use this type of camera. + Supported devices are typically found in some Asus laptops, + with USB id 174f:a311 and 05e1:0501. Other Syntek cameras + may be supported by the stk11xx driver, from which this is + derived, see <http://sourceforge.net/projects/syntekdriver/> + + This driver is deprecated and is scheduled for removal by + the end of 2022. See the TODO file for more information. + + To compile this driver as a module, choose M here: the + module will be called stkwebcam. + diff --git a/drivers/staging/media/stkwebcam/Makefile b/drivers/staging/media/stkwebcam/Makefile new file mode 100644 index 000000000000..17ad7b6f43d0 --- /dev/null +++ b/drivers/staging/media/stkwebcam/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only +stkwebcam-objs := stk-webcam.o stk-sensor.o + +obj-$(CONFIG_VIDEO_STKWEBCAM) += stkwebcam.o + diff --git a/drivers/staging/media/stkwebcam/TODO b/drivers/staging/media/stkwebcam/TODO new file mode 100644 index 000000000000..735304a72729 --- /dev/null +++ b/drivers/staging/media/stkwebcam/TODO @@ -0,0 +1,12 @@ +This is a very old driver for very old hardware (specifically +laptops that use this sensor). In addition according to reports +the picture quality is quite bad. + +This is also one of the few drivers still not using the vb2 +framework (or even the old videobuf framework!), so this driver +is now deprecated with the intent of removing it altogether by +the end of 2022. + +In order to keep this driver it has to be converted to vb2. +If someone is interested in doing this work, then contact the +linux-media mailinglist (https://linuxtv.org/lists.php). diff --git a/drivers/staging/media/stkwebcam/stk-sensor.c b/drivers/staging/media/stkwebcam/stk-sensor.c new file mode 100644 index 000000000000..94aa6a27f934 --- /dev/null +++ b/drivers/staging/media/stkwebcam/stk-sensor.c @@ -0,0 +1,587 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* stk-sensor.c: Driver for ov96xx sensor (used in some Syntek webcams) + * + * Copyright 2007-2008 Jaime Velasco Juan <jsagarribay@gmail.com> + * + * Some parts derived from ov7670.c: + * Copyright 2006 One Laptop Per Child Association, Inc. Written + * by Jonathan Corbet with substantial inspiration from Mark + * McClelland's ovcamchip code. + * + * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net> + * + * This file may be distributed under the terms of the GNU General + */ + +/* Controlling the sensor via the STK1125 vendor specific control interface: + * The camera uses an OmniVision sensor and the stk1125 provides an + * SCCB(i2c)-USB bridge which let us program the sensor. + * In my case the sensor id is 0x9652, it can be read from sensor's register + * 0x0A and 0x0B as follows: + * - read register #R: + * output #R to index 0x0208 + * output 0x0070 to index 0x0200 + * input 1 byte from index 0x0201 (some kind of status register) + * until its value is 0x01 + * input 1 byte from index 0x0209. This is the value of #R + * - write value V to register #R + * output #R to index 0x0204 + * output V to index 0x0205 + * output 0x0005 to index 0x0200 + * input 1 byte from index 0x0201 until its value becomes 0x04 + */ + +/* It seems the i2c bus is controlled with these registers */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "stk-webcam.h" + +#define STK_IIC_BASE (0x0200) +# define STK_IIC_OP (STK_IIC_BASE) +# define STK_IIC_OP_TX (0x05) +# define STK_IIC_OP_RX (0x70) +# define STK_IIC_STAT (STK_IIC_BASE+1) +# define STK_IIC_STAT_TX_OK (0x04) +# define STK_IIC_STAT_RX_OK (0x01) +/* I don't know what does this register. + * when it is 0x00 or 0x01, we cannot talk to the sensor, + * other values work */ +# define STK_IIC_ENABLE (STK_IIC_BASE+2) +# define STK_IIC_ENABLE_NO (0x00) +/* This is what the driver writes in windows */ +# define STK_IIC_ENABLE_YES (0x1e) +/* + * Address of the slave. Seems like the binary driver look for the + * sensor in multiple places, attempting a reset sequence. + * We only know about the ov9650 + */ +# define STK_IIC_ADDR (STK_IIC_BASE+3) +# define STK_IIC_TX_INDEX (STK_IIC_BASE+4) +# define STK_IIC_TX_VALUE (STK_IIC_BASE+5) +# define STK_IIC_RX_INDEX (STK_IIC_BASE+8) +# define STK_IIC_RX_VALUE (STK_IIC_BASE+9) + +#define MAX_RETRIES (50) + +#define SENSOR_ADDRESS (0x60) + +/* From ov7670.c (These registers aren't fully accurate) */ + +/* Registers */ +#define REG_GAIN 0x00 /* Gain lower 8 bits (rest in vref) */ +#define REG_BLUE 0x01 /* blue gain */ +#define REG_RED 0x02 /* red gain */ +#define REG_VREF 0x03 /* Pieces of GAIN, VSTART, VSTOP */ +#define REG_COM1 0x04 /* Control 1 */ +#define COM1_CCIR656 0x40 /* CCIR656 enable */ +#define COM1_QFMT 0x20 /* QVGA/QCIF format */ +#define COM1_SKIP_0 0x00 /* Do not skip any row */ +#define COM1_SKIP_2 0x04 /* Skip 2 rows of 4 */ +#define COM1_SKIP_3 0x08 /* Skip 3 rows of 4 */ +#define REG_BAVE 0x05 /* U/B Average level */ +#define REG_GbAVE 0x06 /* Y/Gb Average level */ +#define REG_AECHH 0x07 /* AEC MS 5 bits */ +#define REG_RAVE 0x08 /* V/R Average level */ +#define REG_COM2 0x09 /* Control 2 */ +#define COM2_SSLEEP 0x10 /* Soft sleep mode */ +#define REG_PID 0x0a /* Product ID MSB */ +#define REG_VER 0x0b /* Product ID LSB */ +#define REG_COM3 0x0c /* Control 3 */ +#define COM3_SWAP 0x40 /* Byte swap */ +#define COM3_SCALEEN 0x08 /* Enable scaling */ +#define COM3_DCWEN 0x04 /* Enable downsamp/crop/window */ +#define REG_COM4 0x0d /* Control 4 */ +#define REG_COM5 0x0e /* All "reserved" */ +#define REG_COM6 0x0f /* Control 6 */ +#define REG_AECH 0x10 /* More bits of AEC value */ +#define REG_CLKRC 0x11 /* Clock control */ +#define CLK_PLL 0x80 /* Enable internal PLL */ +#define CLK_EXT 0x40 /* Use external clock directly */ +#define CLK_SCALE 0x3f /* Mask for internal clock scale */ +#define REG_COM7 0x12 /* Control 7 */ +#define COM7_RESET 0x80 /* Register reset */ +#define COM7_FMT_MASK 0x38 +#define COM7_FMT_SXGA 0x00 +#define COM7_FMT_VGA 0x40 +#define COM7_FMT_CIF 0x20 /* CIF format */ +#define COM7_FMT_QVGA 0x10 /* QVGA format */ +#define COM7_FMT_QCIF 0x08 /* QCIF format */ +#define COM7_RGB 0x04 /* bits 0 and 2 - RGB format */ +#define COM7_YUV 0x00 /* YUV */ +#define COM7_BAYER 0x01 /* Bayer format */ +#define COM7_PBAYER 0x05 /* "Processed bayer" */ +#define REG_COM8 0x13 /* Control 8 */ +#define COM8_FASTAEC 0x80 /* Enable fast AGC/AEC */ +#define COM8_AECSTEP 0x40 /* Unlimited AEC step size */ +#define COM8_BFILT 0x20 /* Band filter enable */ +#define COM8_AGC 0x04 /* Auto gain enable */ +#define COM8_AWB 0x02 /* White balance enable */ +#define COM8_AEC 0x01 /* Auto exposure enable */ +#define REG_COM9 0x14 /* Control 9 - gain ceiling */ +#define REG_COM10 0x15 /* Control 10 */ +#define COM10_HSYNC 0x40 /* HSYNC instead of HREF */ +#define COM10_PCLK_HB 0x20 /* Suppress PCLK on horiz blank */ +#define COM10_HREF_REV 0x08 /* Reverse HREF */ +#define COM10_VS_LEAD 0x04 /* VSYNC on clock leading edge */ +#define COM10_VS_NEG 0x02 /* VSYNC negative */ +#define COM10_HS_NEG 0x01 /* HSYNC negative */ +#define REG_HSTART 0x17 /* Horiz start high bits */ +#define REG_HSTOP 0x18 /* Horiz stop high bits */ +#define REG_VSTART 0x19 /* Vert start high bits */ +#define REG_VSTOP 0x1a /* Vert stop high bits */ +#define REG_PSHFT 0x1b /* Pixel delay after HREF */ +#define REG_MIDH 0x1c /* Manuf. ID high */ +#define REG_MIDL 0x1d /* Manuf. ID low */ +#define REG_MVFP 0x1e /* Mirror / vflip */ +#define MVFP_MIRROR 0x20 /* Mirror image */ +#define MVFP_FLIP 0x10 /* Vertical flip */ + +#define REG_AEW 0x24 /* AGC upper limit */ +#define REG_AEB 0x25 /* AGC lower limit */ +#define REG_VPT 0x26 /* AGC/AEC fast mode op region */ +#define REG_ADVFL 0x2d /* Insert dummy lines (LSB) */ +#define REG_ADVFH 0x2e /* Insert dummy lines (MSB) */ +#define REG_HSYST 0x30 /* HSYNC rising edge delay */ +#define REG_HSYEN 0x31 /* HSYNC falling edge delay */ +#define REG_HREF 0x32 /* HREF pieces */ +#define REG_TSLB 0x3a /* lots of stuff */ +#define TSLB_YLAST 0x04 /* UYVY or VYUY - see com13 */ +#define TSLB_BYTEORD 0x08 /* swap bytes in 16bit mode? */ +#define REG_COM11 0x3b /* Control 11 */ +#define COM11_NIGHT 0x80 /* NIght mode enable */ +#define COM11_NMFR 0x60 /* Two bit NM frame rate */ +#define COM11_HZAUTO 0x10 /* Auto detect 50/60 Hz */ +#define COM11_50HZ 0x08 /* Manual 50Hz select */ +#define COM11_EXP 0x02 +#define REG_COM12 0x3c /* Control 12 */ +#define COM12_HREF 0x80 /* HREF always */ +#define REG_COM13 0x3d /* Control 13 */ +#define COM13_GAMMA 0x80 /* Gamma enable */ +#define COM13_UVSAT 0x40 /* UV saturation auto adjustment */ +#define COM13_CMATRIX 0x10 /* Enable color matrix for RGB or YUV */ +#define COM13_UVSWAP 0x01 /* V before U - w/TSLB */ +#define REG_COM14 0x3e /* Control 14 */ +#define COM14_DCWEN 0x10 /* DCW/PCLK-scale enable */ +#define REG_EDGE 0x3f /* Edge enhancement factor */ +#define REG_COM15 0x40 /* Control 15 */ +#define COM15_R10F0 0x00 /* Data range 10 to F0 */ +#define COM15_R01FE 0x80 /* 01 to FE */ +#define COM15_R00FF 0xc0 /* 00 to FF */ +#define COM15_RGB565 0x10 /* RGB565 output */ +#define COM15_RGBFIXME 0x20 /* FIXME */ +#define COM15_RGB555 0x30 /* RGB555 output */ +#define REG_COM16 0x41 /* Control 16 */ +#define COM16_AWBGAIN 0x08 /* AWB gain enable */ +#define REG_COM17 0x42 /* Control 17 */ +#define COM17_AECWIN 0xc0 /* AEC window - must match COM4 */ +#define COM17_CBAR 0x08 /* DSP Color bar */ + +/* + * This matrix defines how the colors are generated, must be + * tweaked to adjust hue and saturation. + * + * Order: v-red, v-green, v-blue, u-red, u-green, u-blue + * + * They are nine-bit signed quantities, with the sign bit + * stored in 0x58. Sign for v-red is bit 0, and up from there. + */ +#define REG_CMATRIX_BASE 0x4f +#define CMATRIX_LEN 6 +#define REG_CMATRIX_SIGN 0x58 + + +#define REG_BRIGHT 0x55 /* Brightness */ +#define REG_CONTRAS 0x56 /* Contrast control */ + +#define REG_GFIX 0x69 /* Fix gain control */ + +#define REG_RGB444 0x8c /* RGB 444 control */ +#define R444_ENABLE 0x02 /* Turn on RGB444, overrides 5x5 */ +#define R444_RGBX 0x01 /* Empty nibble at end */ + +#define REG_HAECC1 0x9f /* Hist AEC/AGC control 1 */ +#define REG_HAECC2 0xa0 /* Hist AEC/AGC control 2 */ + +#define REG_BD50MAX 0xa5 /* 50hz banding step limit */ +#define REG_HAECC3 0xa6 /* Hist AEC/AGC control 3 */ +#define REG_HAECC4 0xa7 /* Hist AEC/AGC control 4 */ +#define REG_HAECC5 0xa8 /* Hist AEC/AGC control 5 */ +#define REG_HAECC6 0xa9 /* Hist AEC/AGC control 6 */ +#define REG_HAECC7 0xaa /* Hist AEC/AGC control 7 */ +#define REG_BD60MAX 0xab /* 60hz banding step limit */ + + + + +/* Returns 0 if OK */ +static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val) +{ + int i = 0; + u8 tmpval = 0; + + if (stk_camera_write_reg(dev, STK_IIC_TX_INDEX, reg)) + return 1; + if (stk_camera_write_reg(dev, STK_IIC_TX_VALUE, val)) + return 1; + if (stk_camera_write_reg(dev, STK_IIC_OP, STK_IIC_OP_TX)) + return 1; + do { + if (stk_camera_read_reg(dev, STK_IIC_STAT, &tmpval)) + return 1; + i++; + } while (tmpval == 0 && i < MAX_RETRIES); + if (tmpval != STK_IIC_STAT_TX_OK) { + if (tmpval) + pr_err("stk_sensor_outb failed, status=0x%02x\n", + tmpval); + return 1; + } else + return 0; +} + +static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val) +{ + int i = 0; + u8 tmpval = 0; + + if (stk_camera_write_reg(dev, STK_IIC_RX_INDEX, reg)) + return 1; + if (stk_camera_write_reg(dev, STK_IIC_OP, STK_IIC_OP_RX)) + return 1; + do { + if (stk_camera_read_reg(dev, STK_IIC_STAT, &tmpval)) + return 1; + i++; + } while (tmpval == 0 && i < MAX_RETRIES); + if (tmpval != STK_IIC_STAT_RX_OK) { + if (tmpval) + pr_err("stk_sensor_inb failed, status=0x%02x\n", + tmpval); + return 1; + } + + if (stk_camera_read_reg(dev, STK_IIC_RX_VALUE, &tmpval)) + return 1; + + *val = tmpval; + return 0; +} + +static int stk_sensor_write_regvals(struct stk_camera *dev, + struct regval *rv) +{ + int ret; + if (rv == NULL) + return 0; + while (rv->reg != 0xff || rv->val != 0xff) { + ret = stk_sensor_outb(dev, rv->reg, rv->val); + if (ret != 0) + return ret; + rv++; + } + return 0; +} + +int stk_sensor_sleep(struct stk_camera *dev) +{ + u8 tmp; + return stk_sensor_inb(dev, REG_COM2, &tmp) + || stk_sensor_outb(dev, REG_COM2, tmp|COM2_SSLEEP); +} + +int stk_sensor_wakeup(struct stk_camera *dev) +{ + u8 tmp; + return stk_sensor_inb(dev, REG_COM2, &tmp) + || stk_sensor_outb(dev, REG_COM2, tmp&~COM2_SSLEEP); +} + +static struct regval ov_initvals[] = { + {REG_CLKRC, CLK_PLL}, + {REG_COM11, 0x01}, + {0x6a, 0x7d}, + {REG_AECH, 0x40}, + {REG_GAIN, 0x00}, + {REG_BLUE, 0x80}, + {REG_RED, 0x80}, + /* Do not enable fast AEC for now */ + /*{REG_COM8, COM8_FASTAEC|COM8_AECSTEP|COM8_BFILT|COM8_AGC|COM8_AEC},*/ + {REG_COM8, COM8_AECSTEP|COM8_BFILT|COM8_AGC|COM8_AEC}, + {0x39, 0x50}, {0x38, 0x93}, + {0x37, 0x00}, {0x35, 0x81}, + {REG_COM5, 0x20}, + {REG_COM1, 0x00}, + {REG_COM3, 0x00}, + {REG_COM4, 0x00}, + {REG_PSHFT, 0x00}, + {0x16, 0x07}, + {0x33, 0xe2}, {0x34, 0xbf}, + {REG_COM16, 0x00}, + {0x96, 0x04}, + /* Gamma curve values */ +/* { 0x7a, 0x20 }, { 0x7b, 0x10 }, + { 0x7c, 0x1e }, { 0x7d, 0x35 }, + { 0x7e, 0x5a }, { 0x7f, 0x69 }, + { 0x80, 0x76 }, { 0x81, 0x80 }, + { 0x82, 0x88 }, { 0x83, 0x8f }, + { 0x84, 0x96 }, { 0x85, 0xa3 }, + { 0x86, 0xaf }, { 0x87, 0xc4 }, + { 0x88, 0xd7 }, { 0x89, 0xe8 }, +*/ + {REG_GFIX, 0x40}, + {0x8e, 0x00}, + {REG_COM12, 0x73}, + {0x8f, 0xdf}, {0x8b, 0x06}, + {0x8c, 0x20}, + {0x94, 0x88}, {0x95, 0x88}, +/* {REG_COM15, 0xc1}, TODO */ + {0x29, 0x3f}, + {REG_COM6, 0x42}, + {REG_BD50MAX, 0x80}, + {REG_HAECC6, 0xb8}, {REG_HAECC7, 0x92}, + {REG_BD60MAX, 0x0a}, + {0x90, 0x00}, {0x91, 0x00}, + {REG_HAECC1, 0x00}, {REG_HAECC2, 0x00}, + {REG_AEW, 0x68}, {REG_AEB, 0x5c}, + {REG_VPT, 0xc3}, + {REG_COM9, 0x2e}, + {0x2a, 0x00}, {0x2b, 0x00}, + + {0xff, 0xff}, /* END MARKER */ +}; + +/* Probe the I2C bus and initialise the sensor chip */ +int stk_sensor_init(struct stk_camera *dev) +{ + u8 idl = 0; + u8 idh = 0; + + if (stk_camera_write_reg(dev, STK_IIC_ENABLE, STK_IIC_ENABLE_YES) + || stk_camera_write_reg(dev, STK_IIC_ADDR, SENSOR_ADDRESS) + || stk_sensor_outb(dev, REG_COM7, COM7_RESET)) { + pr_err("Sensor resetting failed\n"); + return -ENODEV; + } + msleep(10); + /* Read the manufacturer ID: ov = 0x7FA2 */ + if (stk_sensor_inb(dev, REG_MIDH, &idh) + || stk_sensor_inb(dev, REG_MIDL, &idl)) { + pr_err("Strange error reading sensor ID\n"); + return -ENODEV; + } + if (idh != 0x7f || idl != 0xa2) { + pr_err("Huh? you don't have a sensor from ovt\n"); + return -ENODEV; + } + if (stk_sensor_inb(dev, REG_PID, &idh) + || stk_sensor_inb(dev, REG_VER, &idl)) { + pr_err("Could not read sensor model\n"); + return -ENODEV; + } + stk_sensor_write_regvals(dev, ov_initvals); + msleep(10); + pr_info("OmniVision sensor detected, id %02X%02X at address %x\n", + idh, idl, SENSOR_ADDRESS); + return 0; +} + +/* V4L2_PIX_FMT_UYVY */ +static struct regval ov_fmt_uyvy[] = { + {REG_TSLB, TSLB_YLAST|0x08 }, + { 0x4f, 0x80 }, /* "matrix coefficient 1" */ + { 0x50, 0x80 }, /* "matrix coefficient 2" */ + { 0x51, 0 }, /* vb */ + { 0x52, 0x22 }, /* "matrix coefficient 4" */ + { 0x53, 0x5e }, /* "matrix coefficient 5" */ + { 0x54, 0x80 }, /* "matrix coefficient 6" */ + {REG_COM13, COM13_UVSAT|COM13_CMATRIX}, + {REG_COM15, COM15_R00FF }, + {0xff, 0xff}, /* END MARKER */ +}; +/* V4L2_PIX_FMT_YUYV */ +static struct regval ov_fmt_yuyv[] = { + {REG_TSLB, 0 }, + { 0x4f, 0x80 }, /* "matrix coefficient 1" */ + { 0x50, 0x80 }, /* "matrix coefficient 2" */ + { 0x51, 0 }, /* vb */ + { 0x52, 0x22 }, /* "matrix coefficient 4" */ + { 0x53, 0x5e }, /* "matrix coefficient 5" */ + { 0x54, 0x80 }, /* "matrix coefficient 6" */ + {REG_COM13, COM13_UVSAT|COM13_CMATRIX}, + {REG_COM15, COM15_R00FF }, + {0xff, 0xff}, /* END MARKER */ +}; + +/* V4L2_PIX_FMT_RGB565X rrrrrggg gggbbbbb */ +static struct regval ov_fmt_rgbr[] = { + { REG_RGB444, 0 }, /* No RGB444 please */ + {REG_TSLB, 0x00}, + { REG_COM1, 0x0 }, + { REG_COM9, 0x38 }, /* 16x gain ceiling; 0x8 is reserved bit */ + { 0x4f, 0xb3 }, /* "matrix coefficient 1" */ + { 0x50, 0xb3 }, /* "matrix coefficient 2" */ + { 0x51, 0 }, /* vb */ + { 0x52, 0x3d }, /* "matrix coefficient 4" */ + { 0x53, 0xa7 }, /* "matrix coefficient 5" */ + { 0x54, 0xe4 }, /* "matrix coefficient 6" */ + { REG_COM13, COM13_GAMMA }, + { REG_COM15, COM15_RGB565|COM15_R00FF }, + { 0xff, 0xff }, +}; + +/* V4L2_PIX_FMT_RGB565 gggbbbbb rrrrrggg */ +static struct regval ov_fmt_rgbp[] = { + { REG_RGB444, 0 }, /* No RGB444 please */ + {REG_TSLB, TSLB_BYTEORD }, + { REG_COM1, 0x0 }, + { REG_COM9, 0x38 }, /* 16x gain ceiling; 0x8 is reserved bit */ + { 0x4f, 0xb3 }, /* "matrix coefficient 1" */ + { 0x50, 0xb3 }, /* "matrix coefficient 2" */ + { 0x51, 0 }, /* vb */ + { 0x52, 0x3d }, /* "matrix coefficient 4" */ + { 0x53, 0xa7 }, /* "matrix coefficient 5" */ + { 0x54, 0xe4 }, /* "matrix coefficient 6" */ + { REG_COM13, COM13_GAMMA }, + { REG_COM15, COM15_RGB565|COM15_R00FF }, + { 0xff, 0xff }, +}; + +/* V4L2_PIX_FMT_SRGGB8 */ +static struct regval ov_fmt_bayer[] = { + /* This changes color order */ + {REG_TSLB, 0x40}, /* BGGR */ + /* {REG_TSLB, 0x08}, */ /* BGGR with vertical image flipping */ + {REG_COM15, COM15_R00FF }, + {0xff, 0xff}, /* END MARKER */ +}; +/* + * Store a set of start/stop values into the camera. + */ +static int stk_sensor_set_hw(struct stk_camera *dev, + int hstart, int hstop, int vstart, int vstop) +{ + int ret; + unsigned char v; +/* + * Horizontal: 11 bits, top 8 live in hstart and hstop. Bottom 3 of + * hstart are in href[2:0], bottom 3 of hstop in href[5:3]. There is + * a mystery "edge offset" value in the top two bits of href. + */ + ret = stk_sensor_outb(dev, REG_HSTART, (hstart >> 3) & 0xff); + ret += stk_sensor_outb(dev, REG_HSTOP, (hstop >> 3) & 0xff); + ret += stk_sensor_inb(dev, REG_HREF, &v); + v = (v & 0xc0) | ((hstop & 0x7) << 3) | (hstart & 0x7); + msleep(10); + ret += stk_sensor_outb(dev, REG_HREF, v); +/* + * Vertical: similar arrangement (note: this is different from ov7670.c) + */ + ret += stk_sensor_outb(dev, REG_VSTART, (vstart >> 3) & 0xff); + ret += stk_sensor_outb(dev, REG_VSTOP, (vstop >> 3) & 0xff); + ret += stk_sensor_inb(dev, REG_VREF, &v); + v = (v & 0xc0) | ((vstop & 0x7) << 3) | (vstart & 0x7); + msleep(10); + ret += stk_sensor_outb(dev, REG_VREF, v); + return ret; +} + + +int stk_sensor_configure(struct stk_camera *dev) +{ + int com7; + /* + * We setup the sensor to output dummy lines in low-res modes, + * so we don't get absurdly hight framerates. + */ + unsigned dummylines; + int flip; + struct regval *rv; + + switch (dev->vsettings.mode) { + case MODE_QCIF: com7 = COM7_FMT_QCIF; + dummylines = 604; + break; + case MODE_QVGA: com7 = COM7_FMT_QVGA; + dummylines = 267; + break; + case MODE_CIF: com7 = COM7_FMT_CIF; + dummylines = 412; + break; + case MODE_VGA: com7 = COM7_FMT_VGA; + dummylines = 11; + break; + case MODE_SXGA: com7 = COM7_FMT_SXGA; + dummylines = 0; + break; + default: + pr_err("Unsupported mode %d\n", dev->vsettings.mode); + return -EFAULT; + } + switch (dev->vsettings.palette) { + case V4L2_PIX_FMT_UYVY: + com7 |= COM7_YUV; + rv = ov_fmt_uyvy; + break; + case V4L2_PIX_FMT_YUYV: + com7 |= COM7_YUV; + rv = ov_fmt_yuyv; + break; + case V4L2_PIX_FMT_RGB565: + com7 |= COM7_RGB; + rv = ov_fmt_rgbp; + break; + case V4L2_PIX_FMT_RGB565X: + com7 |= COM7_RGB; + rv = ov_fmt_rgbr; + break; + case V4L2_PIX_FMT_SBGGR8: + com7 |= COM7_PBAYER; + rv = ov_fmt_bayer; + break; + default: + pr_err("Unsupported colorspace\n"); + return -EFAULT; + } + /*FIXME sometimes the sensor go to a bad state + stk_sensor_write_regvals(dev, ov_initvals); */ + stk_sensor_outb(dev, REG_COM7, com7); + msleep(50); + stk_sensor_write_regvals(dev, rv); + flip = (dev->vsettings.vflip?MVFP_FLIP:0) + | (dev->vsettings.hflip?MVFP_MIRROR:0); + stk_sensor_outb(dev, REG_MVFP, flip); + if (dev->vsettings.palette == V4L2_PIX_FMT_SBGGR8 + && !dev->vsettings.vflip) + stk_sensor_outb(dev, REG_TSLB, 0x08); + stk_sensor_outb(dev, REG_ADVFH, dummylines >> 8); + stk_sensor_outb(dev, REG_ADVFL, dummylines & 0xff); + msleep(50); + switch (dev->vsettings.mode) { + case MODE_VGA: + if (stk_sensor_set_hw(dev, 302, 1582, 6, 486)) + pr_err("stk_sensor_set_hw failed (VGA)\n"); + break; + case MODE_SXGA: + case MODE_CIF: + case MODE_QVGA: + case MODE_QCIF: + /*FIXME These settings seem ignored by the sensor + if (stk_sensor_set_hw(dev, 220, 1500, 10, 1034)) + pr_err("stk_sensor_set_hw failed (SXGA)\n"); + */ + break; + } + msleep(10); + return 0; +} + +int stk_sensor_set_brightness(struct stk_camera *dev, int br) +{ + if (br < 0 || br > 0xff) + return -EINVAL; + stk_sensor_outb(dev, REG_AEB, max(0x00, br - 6)); + stk_sensor_outb(dev, REG_AEW, min(0xff, br + 6)); + return 0; +} + diff --git a/drivers/staging/media/stkwebcam/stk-webcam.c b/drivers/staging/media/stkwebcam/stk-webcam.c new file mode 100644 index 000000000000..787edb3d47c2 --- /dev/null +++ b/drivers/staging/media/stkwebcam/stk-webcam.c @@ -0,0 +1,1434 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * stk-webcam.c : Driver for Syntek 1125 USB webcam controller + * + * Copyright (C) 2006 Nicolas VIVIEN + * Copyright 2007-2008 Jaime Velasco Juan <jsagarribay@gmail.com> + * + * Some parts are inspired from cafe_ccic.c + * Copyright 2006-2007 Jonathan Corbet + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/slab.h> + +#include <linux/dmi.h> +#include <linux/usb.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/videodev2.h> +#include <media/v4l2-common.h> +#include <media/v4l2-ioctl.h> +#include <media/v4l2-event.h> + +#include "stk-webcam.h" + + +static int hflip = -1; +module_param(hflip, int, 0444); +MODULE_PARM_DESC(hflip, "Horizontal image flip (mirror). Defaults to 0"); + +static int vflip = -1; +module_param(vflip, int, 0444); +MODULE_PARM_DESC(vflip, "Vertical image flip. Defaults to 0"); + +static int debug; +module_param(debug, int, 0444); +MODULE_PARM_DESC(debug, "Debug v4l ioctls. Defaults to 0"); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jaime Velasco Juan <jsagarribay@gmail.com> and Nicolas VIVIEN"); +MODULE_DESCRIPTION("Syntek DC1125 webcam driver"); + +/* Some cameras have audio interfaces, we aren't interested in those */ +static const struct usb_device_id stkwebcam_table[] = { + { USB_DEVICE_AND_INTERFACE_INFO(0x174f, 0xa311, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x05e1, 0x0501, 0xff, 0xff, 0xff) }, + { } +}; +MODULE_DEVICE_TABLE(usb, stkwebcam_table); + +/* + * The stk webcam laptop module is mounted upside down in some laptops :( + * + * Some background information (thanks to Hans de Goede for providing this): + * + * 1) Once upon a time the stkwebcam driver was written + * + * 2) The webcam in question was used mostly in Asus laptop models, including + * the laptop of the original author of the driver, and in these models, in + * typical Asus fashion (see the long long list for uvc cams inside v4l-utils), + * they mounted the webcam-module the wrong way up. So the hflip and vflip + * module options were given a default value of 1 (the correct value for + * upside down mounted models) + * + * 3) Years later I got a bug report from a user with a laptop with stkwebcam, + * where the module was actually mounted the right way up, and thus showed + * upside down under Linux. So now I was facing the choice of 2 options: + * + * a) Add a not-upside-down list to stkwebcam, which overrules the default. + * + * b) Do it like all the other drivers do, and make the default right for + * cams mounted the proper way and add an upside-down model list, with + * models where we need to flip-by-default. + * + * Despite knowing that going b) would cause a period of pain where we were + * building the table I opted to go for option b), since a) is just too ugly, + * and worse different from how every other driver does it leading to + * confusion in the long run. This change was made in kernel 3.6. + * + * So for any user report about upside-down images since kernel 3.6 ask them + * to provide the output of 'sudo dmidecode' so the laptop can be added in + * the table below. + */ +static const struct dmi_system_id stk_upside_down_dmi_table[] = { + { + .ident = "ASUS G1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "G1") + } + }, { + .ident = "ASUS F3JC", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "F3JC") + } + }, + { + .ident = "T12Rg-H", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HCL Infosystems Limited"), + DMI_MATCH(DMI_PRODUCT_NAME, "T12Rg-H") + } + }, + { + .ident = "ASUS A6VM", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "A6VM") + } + }, + { + .ident = "ASUS A6JC", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "A6JC") + } + }, + {} +}; + + +/* + * Basic stuff + */ +int stk_camera_write_reg(struct stk_camera *dev, u16 index, u8 value) +{ + struct usb_device *udev = dev->udev; + int ret; + + ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + 0x01, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + value, + index, + NULL, + 0, + 500); + if (ret < 0) + return ret; + else + return 0; +} + +int stk_camera_read_reg(struct stk_camera *dev, u16 index, u8 *value) +{ + struct usb_device *udev = dev->udev; + int ret; + + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + 0x00, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x00, + index, + &dev->read_reg_scratch, + sizeof(u8), + 500); + if (ret >= 0) + *value = dev->read_reg_scratch; + + if (ret < 0) + return ret; + else + return 0; +} + +static int stk_start_stream(struct stk_camera *dev) +{ + u8 value; + int i, ret; + u8 value_116, value_117; + + + if (!is_present(dev)) + return -ENODEV; + if (!is_memallocd(dev) || !is_initialised(dev)) { + pr_err("FIXME: Buffers are not allocated\n"); + return -EFAULT; + } + ret = usb_set_interface(dev->udev, 0, 5); + + if (ret < 0) + pr_err("usb_set_interface failed !\n"); + if (stk_sensor_wakeup(dev)) + pr_err("error awaking the sensor\n"); + + stk_camera_read_reg(dev, 0x0116, &value_116); + stk_camera_read_reg(dev, 0x0117, &value_117); + + stk_camera_write_reg(dev, 0x0116, 0x0000); + stk_camera_write_reg(dev, 0x0117, 0x0000); + + stk_camera_read_reg(dev, 0x0100, &value); + stk_camera_write_reg(dev, 0x0100, value | 0x80); + + stk_camera_write_reg(dev, 0x0116, value_116); + stk_camera_write_reg(dev, 0x0117, value_117); + for (i = 0; i < MAX_ISO_BUFS; i++) { + if (dev->isobufs[i].urb) { + ret = usb_submit_urb(dev->isobufs[i].urb, GFP_KERNEL); + atomic_inc(&dev->urbs_used); + if (ret) + return ret; + } + } + set_streaming(dev); + return 0; +} + +static int stk_stop_stream(struct stk_camera *dev) +{ + u8 value; + int i; + if (is_present(dev)) { + stk_camera_read_reg(dev, 0x0100, &value); + stk_camera_write_reg(dev, 0x0100, value & ~0x80); + if (dev->isobufs != NULL) { + for (i = 0; i < MAX_ISO_BUFS; i++) { + if (dev->isobufs[i].urb) + usb_kill_urb(dev->isobufs[i].urb); + } + } + unset_streaming(dev); + + if (usb_set_interface(dev->udev, 0, 0)) + pr_err("usb_set_interface failed !\n"); + if (stk_sensor_sleep(dev)) + pr_err("error suspending the sensor\n"); + } + return 0; +} + +/* + * This seems to be the shortest init sequence we + * must do in order to find the sensor + * Bit 5 of reg. 0x0000 here is important, when reset to 0 the sensor + * is also reset. Maybe powers down it? + * Rest of values don't make a difference + */ + +static struct regval stk1125_initvals[] = { + /*TODO: What means this sequence? */ + {0x0000, 0x24}, + {0x0100, 0x21}, + {0x0002, 0x68}, + {0x0003, 0x80}, + {0x0005, 0x00}, + {0x0007, 0x03}, + {0x000d, 0x00}, + {0x000f, 0x02}, + {0x0300, 0x12}, + {0x0350, 0x41}, + {0x0351, 0x00}, + {0x0352, 0x00}, + {0x0353, 0x00}, + {0x0018, 0x10}, + {0x0019, 0x00}, + {0x001b, 0x0e}, + {0x001c, 0x46}, + {0x0300, 0x80}, + {0x001a, 0x04}, + {0x0110, 0x00}, + {0x0111, 0x00}, + {0x0112, 0x00}, + {0x0113, 0x00}, + + {0xffff, 0xff}, +}; + + +static int stk_initialise(struct stk_camera *dev) +{ + struct regval *rv; + int ret; + if (!is_present(dev)) + return -ENODEV; + if (is_initialised(dev)) + return 0; + rv = stk1125_initvals; + while (rv->reg != 0xffff) { + ret = stk_camera_write_reg(dev, rv->reg, rv->val); + if (ret) + return ret; + rv++; + } + if (stk_sensor_init(dev) == 0) { + set_initialised(dev); + return 0; + } else + return -1; +} + +/* *********************************************** */ +/* + * This function is called as an URB transfert is complete (Isochronous pipe). + * So, the traitement is done in interrupt time, so it has be fast, not crash, + * and not stall. Neat. + */ +static void stk_isoc_handler(struct urb *urb) +{ + int i; + int ret; + int framelen; + unsigned long flags; + + unsigned char *fill = NULL; + unsigned char *iso_buf = NULL; + + struct stk_camera *dev; + struct stk_sio_buffer *fb; + + dev = (struct stk_camera *) urb->context; + + if (dev == NULL) { + pr_err("isoc_handler called with NULL device !\n"); + return; + } + + if (urb->status == -ENOENT || urb->status == -ECONNRESET + || urb->status == -ESHUTDOWN) { + atomic_dec(&dev->urbs_used); + return; + } + + spin_lock_irqsave(&dev->spinlock, flags); + + if (urb->status != -EINPROGRESS && urb->status != 0) { + pr_err("isoc_handler: urb->status == %d\n", urb->status); + goto resubmit; + } + + if (list_empty(&dev->sio_avail)) { + /*FIXME Stop streaming after a while */ + pr_err_ratelimited("isoc_handler without available buffer!\n"); + goto resubmit; + } + fb = list_first_entry(&dev->sio_avail, + struct stk_sio_buffer, list); + fill = fb->buffer + fb->v4lbuf.bytesused; + + for (i = 0; i < urb->number_of_packets; i++) { + if (urb->iso_frame_desc[i].status != 0) { + if (urb->iso_frame_desc[i].status != -EXDEV) + pr_err("Frame %d has error %d\n", + i, urb->iso_frame_desc[i].status); + continue; + } + framelen = urb->iso_frame_desc[i].actual_length; + iso_buf = urb->transfer_buffer + urb->iso_frame_desc[i].offset; + + if (framelen <= 4) + continue; /* no data */ + + /* + * we found something informational from there + * the isoc frames have to type of headers + * type1: 00 xx 00 00 or 20 xx 00 00 + * type2: 80 xx 00 00 00 00 00 00 or a0 xx 00 00 00 00 00 00 + * xx is a sequencer which has never been seen over 0x3f + * imho data written down looks like bayer, i see similarities + * after every 640 bytes + */ + if (*iso_buf & 0x80) { + framelen -= 8; + iso_buf += 8; + /* This marks a new frame */ + if (fb->v4lbuf.bytesused != 0 + && fb->v4lbuf.bytesused != dev->frame_size) { + pr_err_ratelimited("frame %d, bytesused=%d, skipping\n", + i, fb->v4lbuf.bytesused); + fb->v4lbuf.bytesused = 0; + fill = fb->buffer; + } else if (fb->v4lbuf.bytesused == dev->frame_size) { + if (list_is_singular(&dev->sio_avail)) { + /* Always reuse the last buffer */ + fb->v4lbuf.bytesused = 0; + fill = fb->buffer; + } else { + list_move_tail(dev->sio_avail.next, + &dev->sio_full); + wake_up(&dev->wait_frame); + fb = list_first_entry(&dev->sio_avail, + struct stk_sio_buffer, list); + fb->v4lbuf.bytesused = 0; + fill = fb->buffer; + } + } + } else { + framelen -= 4; + iso_buf += 4; + } + + /* Our buffer is full !!! */ + if (framelen + fb->v4lbuf.bytesused > dev->frame_size) { + pr_err_ratelimited("Frame buffer overflow, lost sync\n"); + /*FIXME Do something here? */ + continue; + } + spin_unlock_irqrestore(&dev->spinlock, flags); + memcpy(fill, iso_buf, framelen); + spin_lock_irqsave(&dev->spinlock, flags); + fill += framelen; + + /* New size of our buffer */ + fb->v4lbuf.bytesused += framelen; + } + +resubmit: + spin_unlock_irqrestore(&dev->spinlock, flags); + urb->dev = dev->udev; + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret != 0) { + pr_err("Error (%d) re-submitting urb in stk_isoc_handler\n", + ret); + } +} + +/* -------------------------------------------- */ + +static int stk_prepare_iso(struct stk_camera *dev) +{ + void *kbuf; + int i, j; + struct urb *urb; + struct usb_device *udev; + + if (dev == NULL) + return -ENXIO; + udev = dev->udev; + + if (dev->isobufs) + pr_err("isobufs already allocated. Bad\n"); + else + dev->isobufs = kcalloc(MAX_ISO_BUFS, sizeof(*dev->isobufs), + GFP_KERNEL); + if (dev->isobufs == NULL) { + pr_err("Unable to allocate iso buffers\n"); + return -ENOMEM; + } + for (i = 0; i < MAX_ISO_BUFS; i++) { + if (dev->isobufs[i].data == NULL) { + kbuf = kzalloc(ISO_BUFFER_SIZE, GFP_KERNEL); + if (kbuf == NULL) { + pr_err("Failed to allocate iso buffer %d\n", i); + goto isobufs_out; + } + dev->isobufs[i].data = kbuf; + } else + pr_err("isobuf data already allocated\n"); + if (dev->isobufs[i].urb == NULL) { + urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL); + if (urb == NULL) + goto isobufs_out; + dev->isobufs[i].urb = urb; + } else { + pr_err("Killing URB\n"); + usb_kill_urb(dev->isobufs[i].urb); + urb = dev->isobufs[i].urb; + } + urb->interval = 1; + urb->dev = udev; + urb->pipe = usb_rcvisocpipe(udev, dev->isoc_ep); + urb->transfer_flags = URB_ISO_ASAP; + urb->transfer_buffer = dev->isobufs[i].data; + urb->transfer_buffer_length = ISO_BUFFER_SIZE; + urb->complete = stk_isoc_handler; + urb->context = dev; + urb->start_frame = 0; + urb->number_of_packets = ISO_FRAMES_PER_DESC; + + for (j = 0; j < ISO_FRAMES_PER_DESC; j++) { + urb->iso_frame_desc[j].offset = j * ISO_MAX_FRAME_SIZE; + urb->iso_frame_desc[j].length = ISO_MAX_FRAME_SIZE; + } + } + set_memallocd(dev); + return 0; + +isobufs_out: + for (i = 0; i < MAX_ISO_BUFS && dev->isobufs[i].data; i++) + kfree(dev->isobufs[i].data); + for (i = 0; i < MAX_ISO_BUFS && dev->isobufs[i].urb; i++) + usb_free_urb(dev->isobufs[i].urb); + kfree(dev->isobufs); + dev->isobufs = NULL; + return -ENOMEM; +} + +static void stk_clean_iso(struct stk_camera *dev) +{ + int i; + + if (dev == NULL || dev->isobufs == NULL) + return; + + for (i = 0; i < MAX_ISO_BUFS; i++) { + struct urb *urb; + + urb = dev->isobufs[i].urb; + if (urb) { + if (atomic_read(&dev->urbs_used) && is_present(dev)) + usb_kill_urb(urb); + usb_free_urb(urb); + } + kfree(dev->isobufs[i].data); + } + kfree(dev->isobufs); + dev->isobufs = NULL; + unset_memallocd(dev); +} + +static int stk_setup_siobuf(struct stk_camera *dev, int index) +{ + struct stk_sio_buffer *buf = dev->sio_bufs + index; + INIT_LIST_HEAD(&buf->list); + buf->v4lbuf.length = PAGE_ALIGN(dev->frame_size); + buf->buffer = vmalloc_user(buf->v4lbuf.length); + if (buf->buffer == NULL) + return -ENOMEM; + buf->mapcount = 0; + buf->dev = dev; + buf->v4lbuf.index = index; + buf->v4lbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + buf->v4lbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + buf->v4lbuf.field = V4L2_FIELD_NONE; + buf->v4lbuf.memory = V4L2_MEMORY_MMAP; + buf->v4lbuf.m.offset = 2*index*buf->v4lbuf.length; + return 0; +} + +static int stk_free_sio_buffers(struct stk_camera *dev) +{ + int i; + int nbufs; + unsigned long flags; + if (dev->n_sbufs == 0 || dev->sio_bufs == NULL) + return 0; + /* + * If any buffers are mapped, we cannot free them at all. + */ + for (i = 0; i < dev->n_sbufs; i++) { + if (dev->sio_bufs[i].mapcount > 0) + return -EBUSY; + } + /* + * OK, let's do it. + */ + spin_lock_irqsave(&dev->spinlock, flags); + INIT_LIST_HEAD(&dev->sio_avail); + INIT_LIST_HEAD(&dev->sio_full); + nbufs = dev->n_sbufs; + dev->n_sbufs = 0; + spin_unlock_irqrestore(&dev->spinlock, flags); + for (i = 0; i < nbufs; i++) + vfree(dev->sio_bufs[i].buffer); + kfree(dev->sio_bufs); + dev->sio_bufs = NULL; + return 0; +} + +static int stk_prepare_sio_buffers(struct stk_camera *dev, unsigned n_sbufs) +{ + int i; + if (dev->sio_bufs != NULL) + pr_err("sio_bufs already allocated\n"); + else { + dev->sio_bufs = kcalloc(n_sbufs, + sizeof(struct stk_sio_buffer), + GFP_KERNEL); + if (dev->sio_bufs == NULL) + return -ENOMEM; + for (i = 0; i < n_sbufs; i++) { + if (stk_setup_siobuf(dev, i)) + return (dev->n_sbufs > 1 ? 0 : -ENOMEM); + dev->n_sbufs = i+1; + } + } + return 0; +} + +static int stk_allocate_buffers(struct stk_camera *dev, unsigned n_sbufs) +{ + int err; + err = stk_prepare_iso(dev); + if (err) { + stk_clean_iso(dev); + return err; + } + err = stk_prepare_sio_buffers(dev, n_sbufs); + if (err) { + stk_free_sio_buffers(dev); + return err; + } + return 0; +} + +static void stk_free_buffers(struct stk_camera *dev) +{ + stk_clean_iso(dev); + stk_free_sio_buffers(dev); +} +/* -------------------------------------------- */ + +/* v4l file operations */ + +static int v4l_stk_open(struct file *fp) +{ + struct stk_camera *dev = video_drvdata(fp); + int err; + + if (dev == NULL || !is_present(dev)) + return -ENXIO; + + if (mutex_lock_interruptible(&dev->lock)) + return -ERESTARTSYS; + if (!dev->first_init) + stk_camera_write_reg(dev, 0x0, 0x24); + else + dev->first_init = 0; + + err = v4l2_fh_open(fp); + if (!err) + usb_autopm_get_interface(dev->interface); + mutex_unlock(&dev->lock); + return err; +} + +static int v4l_stk_release(struct file *fp) +{ + struct stk_camera *dev = video_drvdata(fp); + + mutex_lock(&dev->lock); + if (dev->owner == fp) { + stk_stop_stream(dev); + stk_free_buffers(dev); + stk_camera_write_reg(dev, 0x0, 0x49); /* turn off the LED */ + unset_initialised(dev); + dev->owner = NULL; + } + + usb_autopm_put_interface(dev->interface); + mutex_unlock(&dev->lock); + return v4l2_fh_release(fp); +} + +static ssize_t stk_read(struct file *fp, char __user *buf, + size_t count, loff_t *f_pos) +{ + int i; + int ret; + unsigned long flags; + struct stk_sio_buffer *sbuf; + struct stk_camera *dev = video_drvdata(fp); + + if (!is_present(dev)) + return -EIO; + if (dev->owner && (!dev->reading || dev->owner != fp)) + return -EBUSY; + dev->owner = fp; + if (!is_streaming(dev)) { + if (stk_initialise(dev) + || stk_allocate_buffers(dev, 3) + || stk_start_stream(dev)) + return -ENOMEM; + dev->reading = 1; + spin_lock_irqsave(&dev->spinlock, flags); + for (i = 0; i < dev->n_sbufs; i++) { + list_add_tail(&dev->sio_bufs[i].list, &dev->sio_avail); + dev->sio_bufs[i].v4lbuf.flags = V4L2_BUF_FLAG_QUEUED; + } + spin_unlock_irqrestore(&dev->spinlock, flags); + } + if (*f_pos == 0) { + if (fp->f_flags & O_NONBLOCK && list_empty(&dev->sio_full)) + return -EWOULDBLOCK; + ret = wait_event_interruptible(dev->wait_frame, + !list_empty(&dev->sio_full) || !is_present(dev)); + if (ret) + return ret; + if (!is_present(dev)) + return -EIO; + } + if (count + *f_pos > dev->frame_size) + count = dev->frame_size - *f_pos; + spin_lock_irqsave(&dev->spinlock, flags); + if (list_empty(&dev->sio_full)) { + spin_unlock_irqrestore(&dev->spinlock, flags); + pr_err("BUG: No siobufs ready\n"); + return 0; + } + sbuf = list_first_entry(&dev->sio_full, struct stk_sio_buffer, list); + spin_unlock_irqrestore(&dev->spinlock, flags); + + if (copy_to_user(buf, sbuf->buffer + *f_pos, count)) + return -EFAULT; + + *f_pos += count; + + if (*f_pos >= dev->frame_size) { + *f_pos = 0; + spin_lock_irqsave(&dev->spinlock, flags); + list_move_tail(&sbuf->list, &dev->sio_avail); + spin_unlock_irqrestore(&dev->spinlock, flags); + } + return count; +} + +static ssize_t v4l_stk_read(struct file *fp, char __user *buf, + size_t count, loff_t *f_pos) +{ + struct stk_camera *dev = video_drvdata(fp); + int ret; + + if (mutex_lock_interruptible(&dev->lock)) + return -ERESTARTSYS; + ret = stk_read(fp, buf, count, f_pos); + mutex_unlock(&dev->lock); + return ret; +} + +static __poll_t v4l_stk_poll(struct file *fp, poll_table *wait) +{ + struct stk_camera *dev = video_drvdata(fp); + __poll_t res = v4l2_ctrl_poll(fp, wait); + + poll_wait(fp, &dev->wait_frame, wait); + + if (!is_present(dev)) + return EPOLLERR; + + if (!list_empty(&dev->sio_full)) + return res | EPOLLIN | EPOLLRDNORM; + + return res; +} + + +static void stk_v4l_vm_open(struct vm_area_struct *vma) +{ + struct stk_sio_buffer *sbuf = vma->vm_private_data; + sbuf->mapcount++; +} +static void stk_v4l_vm_close(struct vm_area_struct *vma) +{ + struct stk_sio_buffer *sbuf = vma->vm_private_data; + sbuf->mapcount--; + if (sbuf->mapcount == 0) + sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_MAPPED; +} +static const struct vm_operations_struct stk_v4l_vm_ops = { + .open = stk_v4l_vm_open, + .close = stk_v4l_vm_close +}; + +static int v4l_stk_mmap(struct file *fp, struct vm_area_struct *vma) +{ + unsigned int i; + int ret; + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + struct stk_camera *dev = video_drvdata(fp); + struct stk_sio_buffer *sbuf = NULL; + + if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + for (i = 0; i < dev->n_sbufs; i++) { + if (dev->sio_bufs[i].v4lbuf.m.offset == offset) { + sbuf = dev->sio_bufs + i; + break; + } + } + if (sbuf == NULL) + return -EINVAL; + ret = remap_vmalloc_range(vma, sbuf->buffer, 0); + if (ret) + return ret; + vma->vm_flags |= VM_DONTEXPAND; + vma->vm_private_data = sbuf; + vma->vm_ops = &stk_v4l_vm_ops; + sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_MAPPED; + stk_v4l_vm_open(vma); + return 0; +} + +/* v4l ioctl handlers */ + +static int stk_vidioc_querycap(struct file *filp, + void *priv, struct v4l2_capability *cap) +{ + struct stk_camera *dev = video_drvdata(filp); + + strscpy(cap->driver, "stk", sizeof(cap->driver)); + strscpy(cap->card, "stk", sizeof(cap->card)); + usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); + return 0; +} + +static int stk_vidioc_enum_input(struct file *filp, + void *priv, struct v4l2_input *input) +{ + if (input->index != 0) + return -EINVAL; + + strscpy(input->name, "Syntek USB Camera", sizeof(input->name)); + input->type = V4L2_INPUT_TYPE_CAMERA; + return 0; +} + + +static int stk_vidioc_g_input(struct file *filp, void *priv, unsigned int *i) +{ + *i = 0; + return 0; +} + +static int stk_vidioc_s_input(struct file *filp, void *priv, unsigned int i) +{ + return i ? -EINVAL : 0; +} + +static int stk_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct stk_camera *dev = + container_of(ctrl->handler, struct stk_camera, hdl); + + switch (ctrl->id) { + case V4L2_CID_BRIGHTNESS: + return stk_sensor_set_brightness(dev, ctrl->val); + case V4L2_CID_HFLIP: + if (dmi_check_system(stk_upside_down_dmi_table)) + dev->vsettings.hflip = !ctrl->val; + else + dev->vsettings.hflip = ctrl->val; + return 0; + case V4L2_CID_VFLIP: + if (dmi_check_system(stk_upside_down_dmi_table)) + dev->vsettings.vflip = !ctrl->val; + else + dev->vsettings.vflip = ctrl->val; + return 0; + default: + return -EINVAL; + } + return 0; +} + + +static int stk_vidioc_enum_fmt_vid_cap(struct file *filp, + void *priv, struct v4l2_fmtdesc *fmtd) +{ + switch (fmtd->index) { + case 0: + fmtd->pixelformat = V4L2_PIX_FMT_RGB565; + break; + case 1: + fmtd->pixelformat = V4L2_PIX_FMT_RGB565X; + break; + case 2: + fmtd->pixelformat = V4L2_PIX_FMT_UYVY; + break; + case 3: + fmtd->pixelformat = V4L2_PIX_FMT_SBGGR8; + break; + case 4: + fmtd->pixelformat = V4L2_PIX_FMT_YUYV; + break; + default: + return -EINVAL; + } + return 0; +} + +static struct stk_size { + unsigned w; + unsigned h; + enum stk_mode m; +} stk_sizes[] = { + { .w = 1280, .h = 1024, .m = MODE_SXGA, }, + { .w = 640, .h = 480, .m = MODE_VGA, }, + { .w = 352, .h = 288, .m = MODE_CIF, }, + { .w = 320, .h = 240, .m = MODE_QVGA, }, + { .w = 176, .h = 144, .m = MODE_QCIF, }, +}; + +static int stk_vidioc_g_fmt_vid_cap(struct file *filp, + void *priv, struct v4l2_format *f) +{ + struct v4l2_pix_format *pix_format = &f->fmt.pix; + struct stk_camera *dev = video_drvdata(filp); + int i; + + for (i = 0; i < ARRAY_SIZE(stk_sizes) && + stk_sizes[i].m != dev->vsettings.mode; i++) + ; + if (i == ARRAY_SIZE(stk_sizes)) { + pr_err("ERROR: mode invalid\n"); + return -EINVAL; + } + pix_format->width = stk_sizes[i].w; + pix_format->height = stk_sizes[i].h; + pix_format->field = V4L2_FIELD_NONE; + pix_format->colorspace = V4L2_COLORSPACE_SRGB; + pix_format->pixelformat = dev->vsettings.palette; + if (dev->vsettings.palette == V4L2_PIX_FMT_SBGGR8) + pix_format->bytesperline = pix_format->width; + else + pix_format->bytesperline = 2 * pix_format->width; + pix_format->sizeimage = pix_format->bytesperline + * pix_format->height; + return 0; +} + +static int stk_try_fmt_vid_cap(struct file *filp, + struct v4l2_format *fmtd, int *idx) +{ + int i; + switch (fmtd->fmt.pix.pixelformat) { + case V4L2_PIX_FMT_RGB565: + case V4L2_PIX_FMT_RGB565X: + case V4L2_PIX_FMT_UYVY: + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_SBGGR8: + break; + default: + return -EINVAL; + } + for (i = 1; i < ARRAY_SIZE(stk_sizes); i++) { + if (fmtd->fmt.pix.width > stk_sizes[i].w) + break; + } + if (i == ARRAY_SIZE(stk_sizes) + || (abs(fmtd->fmt.pix.width - stk_sizes[i-1].w) + < abs(fmtd->fmt.pix.width - stk_sizes[i].w))) { + fmtd->fmt.pix.height = stk_sizes[i-1].h; + fmtd->fmt.pix.width = stk_sizes[i-1].w; + if (idx) + *idx = i - 1; + } else { + fmtd->fmt.pix.height = stk_sizes[i].h; + fmtd->fmt.pix.width = stk_sizes[i].w; + if (idx) + *idx = i; + } + + fmtd->fmt.pix.field = V4L2_FIELD_NONE; + fmtd->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; + if (fmtd->fmt.pix.pixelformat == V4L2_PIX_FMT_SBGGR8) + fmtd->fmt.pix.bytesperline = fmtd->fmt.pix.width; + else + fmtd->fmt.pix.bytesperline = 2 * fmtd->fmt.pix.width; + fmtd->fmt.pix.sizeimage = fmtd->fmt.pix.bytesperline + * fmtd->fmt.pix.height; + return 0; +} + +static int stk_vidioc_try_fmt_vid_cap(struct file *filp, + void *priv, struct v4l2_format *fmtd) +{ + return stk_try_fmt_vid_cap(filp, fmtd, NULL); +} + +static int stk_setup_format(struct stk_camera *dev) +{ + int i = 0; + int depth; + if (dev->vsettings.palette == V4L2_PIX_FMT_SBGGR8) + depth = 1; + else + depth = 2; + while (i < ARRAY_SIZE(stk_sizes) && + stk_sizes[i].m != dev->vsettings.mode) + i++; + if (i == ARRAY_SIZE(stk_sizes)) { + pr_err("Something is broken in %s\n", __func__); + return -EFAULT; + } + /* This registers controls some timings, not sure of what. */ + stk_camera_write_reg(dev, 0x001b, 0x0e); + if (dev->vsettings.mode == MODE_SXGA) + stk_camera_write_reg(dev, 0x001c, 0x0e); + else + stk_camera_write_reg(dev, 0x001c, 0x46); + /* + * Registers 0x0115 0x0114 are the size of each line (bytes), + * regs 0x0117 0x0116 are the height of the image. + */ + stk_camera_write_reg(dev, 0x0115, + ((stk_sizes[i].w * depth) >> 8) & 0xff); + stk_camera_write_reg(dev, 0x0114, + (stk_sizes[i].w * depth) & 0xff); + stk_camera_write_reg(dev, 0x0117, + (stk_sizes[i].h >> 8) & 0xff); + stk_camera_write_reg(dev, 0x0116, + stk_sizes[i].h & 0xff); + return stk_sensor_configure(dev); +} + +static int stk_vidioc_s_fmt_vid_cap(struct file *filp, + void *priv, struct v4l2_format *fmtd) +{ + int ret; + int idx; + struct stk_camera *dev = video_drvdata(filp); + + if (dev == NULL) + return -ENODEV; + if (!is_present(dev)) + return -ENODEV; + if (is_streaming(dev)) + return -EBUSY; + if (dev->owner) + return -EBUSY; + ret = stk_try_fmt_vid_cap(filp, fmtd, &idx); + if (ret) + return ret; + + dev->vsettings.palette = fmtd->fmt.pix.pixelformat; + stk_free_buffers(dev); + dev->frame_size = fmtd->fmt.pix.sizeimage; + dev->vsettings.mode = stk_sizes[idx].m; + + stk_initialise(dev); + return stk_setup_format(dev); +} + +static int stk_vidioc_reqbufs(struct file *filp, + void *priv, struct v4l2_requestbuffers *rb) +{ + struct stk_camera *dev = video_drvdata(filp); + + if (dev == NULL) + return -ENODEV; + if (rb->memory != V4L2_MEMORY_MMAP) + return -EINVAL; + if (is_streaming(dev) + || (dev->owner && dev->owner != filp)) + return -EBUSY; + stk_free_buffers(dev); + if (rb->count == 0) { + stk_camera_write_reg(dev, 0x0, 0x49); /* turn off the LED */ + unset_initialised(dev); + dev->owner = NULL; + return 0; + } + dev->owner = filp; + + /*FIXME If they ask for zero, we must stop streaming and free */ + if (rb->count < 3) + rb->count = 3; + /* Arbitrary limit */ + else if (rb->count > 5) + rb->count = 5; + + stk_allocate_buffers(dev, rb->count); + rb->count = dev->n_sbufs; + return 0; +} + +static int stk_vidioc_querybuf(struct file *filp, + void *priv, struct v4l2_buffer *buf) +{ + struct stk_camera *dev = video_drvdata(filp); + struct stk_sio_buffer *sbuf; + + if (buf->index >= dev->n_sbufs) + return -EINVAL; + sbuf = dev->sio_bufs + buf->index; + *buf = sbuf->v4lbuf; + return 0; +} + +static int stk_vidioc_qbuf(struct file *filp, + void *priv, struct v4l2_buffer *buf) +{ + struct stk_camera *dev = video_drvdata(filp); + struct stk_sio_buffer *sbuf; + unsigned long flags; + + if (buf->memory != V4L2_MEMORY_MMAP) + return -EINVAL; + + if (buf->index >= dev->n_sbufs) + return -EINVAL; + sbuf = dev->sio_bufs + buf->index; + if (sbuf->v4lbuf.flags & V4L2_BUF_FLAG_QUEUED) + return 0; + sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_QUEUED; + sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_DONE; + spin_lock_irqsave(&dev->spinlock, flags); + list_add_tail(&sbuf->list, &dev->sio_avail); + *buf = sbuf->v4lbuf; + spin_unlock_irqrestore(&dev->spinlock, flags); + return 0; +} + +static int stk_vidioc_dqbuf(struct file *filp, + void *priv, struct v4l2_buffer *buf) +{ + struct stk_camera *dev = video_drvdata(filp); + struct stk_sio_buffer *sbuf; + unsigned long flags; + int ret; + + if (!is_streaming(dev)) + return -EINVAL; + + if (filp->f_flags & O_NONBLOCK && list_empty(&dev->sio_full)) + return -EWOULDBLOCK; + ret = wait_event_interruptible(dev->wait_frame, + !list_empty(&dev->sio_full) || !is_present(dev)); + if (ret) + return ret; + if (!is_present(dev)) + return -EIO; + + spin_lock_irqsave(&dev->spinlock, flags); + sbuf = list_first_entry(&dev->sio_full, struct stk_sio_buffer, list); + list_del_init(&sbuf->list); + spin_unlock_irqrestore(&dev->spinlock, flags); + sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_QUEUED; + sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_DONE; + sbuf->v4lbuf.sequence = ++dev->sequence; + v4l2_buffer_set_timestamp(&sbuf->v4lbuf, ktime_get_ns()); + + *buf = sbuf->v4lbuf; + return 0; +} + +static int stk_vidioc_streamon(struct file *filp, + void *priv, enum v4l2_buf_type type) +{ + struct stk_camera *dev = video_drvdata(filp); + if (is_streaming(dev)) + return 0; + if (dev->sio_bufs == NULL) + return -EINVAL; + dev->sequence = 0; + return stk_start_stream(dev); +} + +static int stk_vidioc_streamoff(struct file *filp, + void *priv, enum v4l2_buf_type type) +{ + struct stk_camera *dev = video_drvdata(filp); + unsigned long flags; + int i; + stk_stop_stream(dev); + spin_lock_irqsave(&dev->spinlock, flags); + INIT_LIST_HEAD(&dev->sio_avail); + INIT_LIST_HEAD(&dev->sio_full); + for (i = 0; i < dev->n_sbufs; i++) { + INIT_LIST_HEAD(&dev->sio_bufs[i].list); + dev->sio_bufs[i].v4lbuf.flags = 0; + } + spin_unlock_irqrestore(&dev->spinlock, flags); + return 0; +} + + +static int stk_vidioc_g_parm(struct file *filp, + void *priv, struct v4l2_streamparm *sp) +{ + /*FIXME This is not correct */ + sp->parm.capture.timeperframe.numerator = 1; + sp->parm.capture.timeperframe.denominator = 30; + sp->parm.capture.readbuffers = 2; + return 0; +} + +static int stk_vidioc_enum_framesizes(struct file *filp, + void *priv, struct v4l2_frmsizeenum *frms) +{ + if (frms->index >= ARRAY_SIZE(stk_sizes)) + return -EINVAL; + switch (frms->pixel_format) { + case V4L2_PIX_FMT_RGB565: + case V4L2_PIX_FMT_RGB565X: + case V4L2_PIX_FMT_UYVY: + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_SBGGR8: + frms->type = V4L2_FRMSIZE_TYPE_DISCRETE; + frms->discrete.width = stk_sizes[frms->index].w; + frms->discrete.height = stk_sizes[frms->index].h; + return 0; + default: return -EINVAL; + } +} + +static const struct v4l2_ctrl_ops stk_ctrl_ops = { + .s_ctrl = stk_s_ctrl, +}; + +static const struct v4l2_file_operations v4l_stk_fops = { + .owner = THIS_MODULE, + .open = v4l_stk_open, + .release = v4l_stk_release, + .read = v4l_stk_read, + .poll = v4l_stk_poll, + .mmap = v4l_stk_mmap, + .unlocked_ioctl = video_ioctl2, +}; + +static const struct v4l2_ioctl_ops v4l_stk_ioctl_ops = { + .vidioc_querycap = stk_vidioc_querycap, + .vidioc_enum_fmt_vid_cap = stk_vidioc_enum_fmt_vid_cap, + .vidioc_try_fmt_vid_cap = stk_vidioc_try_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = stk_vidioc_s_fmt_vid_cap, + .vidioc_g_fmt_vid_cap = stk_vidioc_g_fmt_vid_cap, + .vidioc_enum_input = stk_vidioc_enum_input, + .vidioc_s_input = stk_vidioc_s_input, + .vidioc_g_input = stk_vidioc_g_input, + .vidioc_reqbufs = stk_vidioc_reqbufs, + .vidioc_querybuf = stk_vidioc_querybuf, + .vidioc_qbuf = stk_vidioc_qbuf, + .vidioc_dqbuf = stk_vidioc_dqbuf, + .vidioc_streamon = stk_vidioc_streamon, + .vidioc_streamoff = stk_vidioc_streamoff, + .vidioc_g_parm = stk_vidioc_g_parm, + .vidioc_enum_framesizes = stk_vidioc_enum_framesizes, + .vidioc_log_status = v4l2_ctrl_log_status, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, +}; + +static void stk_v4l_dev_release(struct video_device *vd) +{ + struct stk_camera *dev = vdev_to_camera(vd); + + if (dev->sio_bufs != NULL || dev->isobufs != NULL) + pr_err("We are leaking memory\n"); + usb_put_intf(dev->interface); + usb_put_dev(dev->udev); + + v4l2_ctrl_handler_free(&dev->hdl); + v4l2_device_unregister(&dev->v4l2_dev); + kfree(dev); +} + +static const struct video_device stk_v4l_data = { + .name = "stkwebcam", + .fops = &v4l_stk_fops, + .ioctl_ops = &v4l_stk_ioctl_ops, + .release = stk_v4l_dev_release, +}; + + +static int stk_register_video_device(struct stk_camera *dev) +{ + int err; + + dev->vdev = stk_v4l_data; + dev->vdev.lock = &dev->lock; + dev->vdev.v4l2_dev = &dev->v4l2_dev; + dev->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | + V4L2_CAP_STREAMING; + video_set_drvdata(&dev->vdev, dev); + err = video_register_device(&dev->vdev, VFL_TYPE_VIDEO, -1); + if (err) + pr_err("v4l registration failed\n"); + else + pr_info("Syntek USB2.0 Camera is now controlling device %s\n", + video_device_node_name(&dev->vdev)); + return err; +} + + +/* USB Stuff */ + +static int stk_camera_probe(struct usb_interface *interface, + const struct usb_device_id *id) +{ + struct v4l2_ctrl_handler *hdl; + int err = 0; + int i; + + struct stk_camera *dev = NULL; + struct usb_device *udev = interface_to_usbdev(interface); + struct usb_host_interface *iface_desc; + struct usb_endpoint_descriptor *endpoint; + + dev = kzalloc(sizeof(struct stk_camera), GFP_KERNEL); + if (dev == NULL) { + pr_err("Out of memory !\n"); + return -ENOMEM; + } + err = v4l2_device_register(&interface->dev, &dev->v4l2_dev); + if (err < 0) { + dev_err(&udev->dev, "couldn't register v4l2_device\n"); + kfree(dev); + return err; + } + hdl = &dev->hdl; + v4l2_ctrl_handler_init(hdl, 3); + v4l2_ctrl_new_std(hdl, &stk_ctrl_ops, + V4L2_CID_BRIGHTNESS, 0, 0xff, 0x1, 0x60); + v4l2_ctrl_new_std(hdl, &stk_ctrl_ops, + V4L2_CID_HFLIP, 0, 1, 1, 1); + v4l2_ctrl_new_std(hdl, &stk_ctrl_ops, + V4L2_CID_VFLIP, 0, 1, 1, 1); + if (hdl->error) { + err = hdl->error; + dev_err(&udev->dev, "couldn't register control\n"); + goto error; + } + dev->v4l2_dev.ctrl_handler = hdl; + + spin_lock_init(&dev->spinlock); + mutex_init(&dev->lock); + init_waitqueue_head(&dev->wait_frame); + dev->first_init = 1; /* webcam LED management */ + + dev->udev = usb_get_dev(udev); + dev->interface = interface; + usb_get_intf(interface); + + if (hflip != -1) + dev->vsettings.hflip = hflip; + else if (dmi_check_system(stk_upside_down_dmi_table)) + dev->vsettings.hflip = 1; + else + dev->vsettings.hflip = 0; + if (vflip != -1) + dev->vsettings.vflip = vflip; + else if (dmi_check_system(stk_upside_down_dmi_table)) + dev->vsettings.vflip = 1; + else + dev->vsettings.vflip = 0; + dev->n_sbufs = 0; + set_present(dev); + + /* Set up the endpoint information + * use only the first isoc-in endpoint + * for the current alternate setting */ + iface_desc = interface->cur_altsetting; + + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + endpoint = &iface_desc->endpoint[i].desc; + + if (!dev->isoc_ep + && usb_endpoint_is_isoc_in(endpoint)) { + /* we found an isoc in endpoint */ + dev->isoc_ep = usb_endpoint_num(endpoint); + break; + } + } + if (!dev->isoc_ep) { + pr_err("Could not find isoc-in endpoint\n"); + err = -ENODEV; + goto error_put; + } + dev->vsettings.palette = V4L2_PIX_FMT_RGB565; + dev->vsettings.mode = MODE_VGA; + dev->frame_size = 640 * 480 * 2; + + INIT_LIST_HEAD(&dev->sio_avail); + INIT_LIST_HEAD(&dev->sio_full); + + usb_set_intfdata(interface, dev); + + err = stk_register_video_device(dev); + if (err) + goto error_put; + + return 0; + +error_put: + usb_put_intf(interface); + usb_put_dev(dev->udev); +error: + v4l2_ctrl_handler_free(hdl); + v4l2_device_unregister(&dev->v4l2_dev); + kfree(dev); + return err; +} + +static void stk_camera_disconnect(struct usb_interface *interface) +{ + struct stk_camera *dev = usb_get_intfdata(interface); + + usb_set_intfdata(interface, NULL); + unset_present(dev); + + wake_up_interruptible(&dev->wait_frame); + + pr_info("Syntek USB2.0 Camera release resources device %s\n", + video_device_node_name(&dev->vdev)); + + video_unregister_device(&dev->vdev); +} + +#ifdef CONFIG_PM +static int stk_camera_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct stk_camera *dev = usb_get_intfdata(intf); + if (is_streaming(dev)) { + stk_stop_stream(dev); + /* yes, this is ugly */ + set_streaming(dev); + } + return 0; +} + +static int stk_camera_resume(struct usb_interface *intf) +{ + struct stk_camera *dev = usb_get_intfdata(intf); + if (!is_initialised(dev)) + return 0; + unset_initialised(dev); + stk_initialise(dev); + stk_camera_write_reg(dev, 0x0, 0x49); + stk_setup_format(dev); + if (is_streaming(dev)) + stk_start_stream(dev); + return 0; +} +#endif + +static struct usb_driver stk_camera_driver = { + .name = "stkwebcam", + .probe = stk_camera_probe, + .disconnect = stk_camera_disconnect, + .id_table = stkwebcam_table, +#ifdef CONFIG_PM + .suspend = stk_camera_suspend, + .resume = stk_camera_resume, +#endif +}; + +module_usb_driver(stk_camera_driver); diff --git a/drivers/staging/media/stkwebcam/stk-webcam.h b/drivers/staging/media/stkwebcam/stk-webcam.h new file mode 100644 index 000000000000..136decffe9ce --- /dev/null +++ b/drivers/staging/media/stkwebcam/stk-webcam.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * stk-webcam.h : Driver for Syntek 1125 USB webcam controller + * + * Copyright (C) 2006 Nicolas VIVIEN + * Copyright 2007-2008 Jaime Velasco Juan <jsagarribay@gmail.com> + */ + +#ifndef STKWEBCAM_H +#define STKWEBCAM_H + +#include <linux/usb.h> +#include <media/v4l2-device.h> +#include <media/v4l2-ctrls.h> +#include <media/v4l2-common.h> + +#define DRIVER_VERSION "v0.0.1" +#define DRIVER_VERSION_NUM 0x000001 + +#define MAX_ISO_BUFS 3 +#define ISO_FRAMES_PER_DESC 16 +#define ISO_MAX_FRAME_SIZE 3 * 1024 +#define ISO_BUFFER_SIZE (ISO_FRAMES_PER_DESC * ISO_MAX_FRAME_SIZE) + +struct stk_iso_buf { + void *data; + int length; + int read; + struct urb *urb; +}; + +/* Streaming IO buffers */ +struct stk_sio_buffer { + struct v4l2_buffer v4lbuf; + char *buffer; + int mapcount; + struct stk_camera *dev; + struct list_head list; +}; + +enum stk_mode {MODE_VGA, MODE_SXGA, MODE_CIF, MODE_QVGA, MODE_QCIF}; + +struct stk_video { + enum stk_mode mode; + __u32 palette; + int hflip; + int vflip; +}; + +enum stk_status { + S_PRESENT = 1, + S_INITIALISED = 2, + S_MEMALLOCD = 4, + S_STREAMING = 8, +}; +#define is_present(dev) ((dev)->status & S_PRESENT) +#define is_initialised(dev) ((dev)->status & S_INITIALISED) +#define is_streaming(dev) ((dev)->status & S_STREAMING) +#define is_memallocd(dev) ((dev)->status & S_MEMALLOCD) +#define set_present(dev) ((dev)->status = S_PRESENT) +#define unset_present(dev) ((dev)->status &= \ + ~(S_PRESENT|S_INITIALISED|S_STREAMING)) +#define set_initialised(dev) ((dev)->status |= S_INITIALISED) +#define unset_initialised(dev) ((dev)->status &= ~S_INITIALISED) +#define set_memallocd(dev) ((dev)->status |= S_MEMALLOCD) +#define unset_memallocd(dev) ((dev)->status &= ~S_MEMALLOCD) +#define set_streaming(dev) ((dev)->status |= S_STREAMING) +#define unset_streaming(dev) ((dev)->status &= ~S_STREAMING) + +struct regval { + unsigned reg; + unsigned val; +}; + +struct stk_camera { + struct v4l2_device v4l2_dev; + struct v4l2_ctrl_handler hdl; + struct video_device vdev; + struct usb_device *udev; + struct usb_interface *interface; + int webcam_model; + struct file *owner; + struct mutex lock; + int first_init; + + u8 isoc_ep; + + /* Not sure if this is right */ + atomic_t urbs_used; + + struct stk_video vsettings; + + enum stk_status status; + + spinlock_t spinlock; + wait_queue_head_t wait_frame; + + struct stk_iso_buf *isobufs; + + int frame_size; + /* Streaming buffers */ + int reading; + unsigned int n_sbufs; + struct stk_sio_buffer *sio_bufs; + struct list_head sio_avail; + struct list_head sio_full; + unsigned sequence; + + u8 read_reg_scratch; +}; + +#define vdev_to_camera(d) container_of(d, struct stk_camera, vdev) + +int stk_camera_write_reg(struct stk_camera *, u16, u8); +int stk_camera_read_reg(struct stk_camera *, u16, u8 *); + +int stk_sensor_init(struct stk_camera *); +int stk_sensor_configure(struct stk_camera *); +int stk_sensor_sleep(struct stk_camera *dev); +int stk_sensor_wakeup(struct stk_camera *dev); +int stk_sensor_set_brightness(struct stk_camera *dev, int br); + +#endif diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c index 68b3dcdb5df3..960a0130cd62 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus.c @@ -42,7 +42,7 @@ static int cedrus_try_ctrl(struct v4l2_ctrl *ctrl) if (sps->bit_depth_luma_minus8 != 0) /* Only 8-bit is supported */ return -EINVAL; - } else if (ctrl->id == V4L2_CID_MPEG_VIDEO_HEVC_SPS) { + } else if (ctrl->id == V4L2_CID_STATELESS_HEVC_SPS) { const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps; struct cedrus_ctx *ctx = container_of(ctrl->handler, struct cedrus_ctx, hdl); @@ -164,42 +164,54 @@ static const struct cedrus_control cedrus_controls[] = { }, { .cfg = { - .id = V4L2_CID_MPEG_VIDEO_HEVC_SPS, + .id = V4L2_CID_STATELESS_HEVC_SPS, .ops = &cedrus_ctrl_ops, }, .codec = CEDRUS_CODEC_H265, }, { .cfg = { - .id = V4L2_CID_MPEG_VIDEO_HEVC_PPS, + .id = V4L2_CID_STATELESS_HEVC_PPS, }, .codec = CEDRUS_CODEC_H265, }, { .cfg = { - .id = V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS, + .id = V4L2_CID_STATELESS_HEVC_SLICE_PARAMS, + /* The driver can only handle 1 entry per slice for now */ + .dims = { 1 }, }, .codec = CEDRUS_CODEC_H265, }, { .cfg = { - .id = V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX, + .id = V4L2_CID_STATELESS_HEVC_SCALING_MATRIX, }, .codec = CEDRUS_CODEC_H265, }, { .cfg = { - .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE, - .max = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED, - .def = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED, + .id = V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS, + /* maximum 256 entry point offsets per slice */ + .dims = { 256 }, + .max = 0xffffffff, + .step = 1, }, .codec = CEDRUS_CODEC_H265, }, { .cfg = { - .id = V4L2_CID_MPEG_VIDEO_HEVC_START_CODE, - .max = V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE, - .def = V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE, + .id = V4L2_CID_STATELESS_HEVC_DECODE_MODE, + .max = V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED, + .def = V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED, + }, + .codec = CEDRUS_CODEC_H265, + }, + { + .cfg = { + .id = V4L2_CID_STATELESS_HEVC_START_CODE, + .max = V4L2_STATELESS_HEVC_START_CODE_NONE, + .def = V4L2_STATELESS_HEVC_START_CODE_NONE, }, .codec = CEDRUS_CODEC_H265, }, @@ -211,7 +223,7 @@ static const struct cedrus_control cedrus_controls[] = { }, { .cfg = { - .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS, + .id = V4L2_CID_STATELESS_HEVC_DECODE_PARAMS, }, .codec = CEDRUS_CODEC_H265, }, @@ -230,6 +242,17 @@ void *cedrus_find_control_data(struct cedrus_ctx *ctx, u32 id) return NULL; } +u32 cedrus_get_num_of_controls(struct cedrus_ctx *ctx, u32 id) +{ + unsigned int i; + + for (i = 0; ctx->ctrls[i]; i++) + if (ctx->ctrls[i]->id == id) + return ctx->ctrls[i]->elems; + + return 0; +} + static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx) { struct v4l2_ctrl_handler *hdl = &ctx->hdl; @@ -240,7 +263,8 @@ static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx) v4l2_ctrl_handler_init(hdl, CEDRUS_CONTROLS_COUNT); if (hdl->error) { v4l2_err(&dev->v4l2_dev, - "Failed to initialize control handler\n"); + "Failed to initialize control handler: %d\n", + hdl->error); return hdl->error; } @@ -255,7 +279,9 @@ static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx) NULL); if (hdl->error) { v4l2_err(&dev->v4l2_dev, - "Failed to create new custom control\n"); + "Failed to create %s control: %d\n", + v4l2_ctrl_get_name(cedrus_controls[i].cfg.id), + hdl->error); v4l2_ctrl_handler_free(hdl); kfree(ctx->ctrls); diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h index 3bc094eb497f..084193019350 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus.h +++ b/drivers/staging/media/sunxi/cedrus/cedrus.h @@ -81,6 +81,8 @@ struct cedrus_h265_run { const struct v4l2_ctrl_hevc_slice_params *slice_params; const struct v4l2_ctrl_hevc_decode_params *decode_params; const struct v4l2_ctrl_hevc_scaling_matrix *scaling_matrix; + const u32 *entry_points; + u32 entry_points_count; }; struct cedrus_vp8_run { @@ -146,6 +148,8 @@ struct cedrus_ctx { ssize_t mv_col_buf_unit_size; void *neighbor_info_buf; dma_addr_t neighbor_info_buf_addr; + void *entry_points_buf; + dma_addr_t entry_points_buf_addr; } h265; struct { unsigned int last_frame_p_type; @@ -162,7 +166,7 @@ struct cedrus_dec_ops { void (*irq_clear)(struct cedrus_ctx *ctx); void (*irq_disable)(struct cedrus_ctx *ctx); enum cedrus_irq_status (*irq_status)(struct cedrus_ctx *ctx); - void (*setup)(struct cedrus_ctx *ctx, struct cedrus_run *run); + int (*setup)(struct cedrus_ctx *ctx, struct cedrus_run *run); int (*start)(struct cedrus_ctx *ctx); void (*stop)(struct cedrus_ctx *ctx); void (*trigger)(struct cedrus_ctx *ctx); @@ -261,5 +265,6 @@ vb2_to_cedrus_buffer(const struct vb2_buffer *p) } void *cedrus_find_control_data(struct cedrus_ctx *ctx, u32 id); +u32 cedrus_get_num_of_controls(struct cedrus_ctx *ctx, u32 id); #endif diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c index 9c7200299465..3b6aa78a2985 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c @@ -28,6 +28,7 @@ void cedrus_device_run(void *priv) struct cedrus_dev *dev = ctx->dev; struct cedrus_run run = {}; struct media_request *src_req; + int error; run.src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); run.dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); @@ -65,15 +66,19 @@ void cedrus_device_run(void *priv) case V4L2_PIX_FMT_HEVC_SLICE: run.h265.sps = cedrus_find_control_data(ctx, - V4L2_CID_MPEG_VIDEO_HEVC_SPS); + V4L2_CID_STATELESS_HEVC_SPS); run.h265.pps = cedrus_find_control_data(ctx, - V4L2_CID_MPEG_VIDEO_HEVC_PPS); + V4L2_CID_STATELESS_HEVC_PPS); run.h265.slice_params = cedrus_find_control_data(ctx, - V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS); + V4L2_CID_STATELESS_HEVC_SLICE_PARAMS); run.h265.decode_params = cedrus_find_control_data(ctx, - V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS); + V4L2_CID_STATELESS_HEVC_DECODE_PARAMS); run.h265.scaling_matrix = cedrus_find_control_data(ctx, - V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX); + V4L2_CID_STATELESS_HEVC_SCALING_MATRIX); + run.h265.entry_points = cedrus_find_control_data(ctx, + V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS); + run.h265.entry_points_count = cedrus_get_num_of_controls(ctx, + V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS); break; case V4L2_PIX_FMT_VP8_FRAME: @@ -89,16 +94,26 @@ void cedrus_device_run(void *priv) cedrus_dst_format_set(dev, &ctx->dst_fmt); - dev->dec_ops[ctx->current_codec]->setup(ctx, &run); + error = dev->dec_ops[ctx->current_codec]->setup(ctx, &run); + if (error) + v4l2_err(&ctx->dev->v4l2_dev, + "Failed to setup decoding job: %d\n", error); /* Complete request(s) controls if needed. */ if (src_req) v4l2_ctrl_request_complete(src_req, &ctx->hdl); - dev->dec_ops[ctx->current_codec]->trigger(ctx); - - /* Start the watchdog timer. */ - schedule_delayed_work(&dev->watchdog_work, - msecs_to_jiffies(2000)); + /* Trigger decoding if setup went well, bail out otherwise. */ + if (!error) { + dev->dec_ops[ctx->current_codec]->trigger(ctx); + + /* Start the watchdog timer. */ + schedule_delayed_work(&dev->watchdog_work, + msecs_to_jiffies(2000)); + } else { + v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, + ctx->fh.m2m_ctx, + VB2_BUF_STATE_ERROR); + } } diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c index d8fb93035470..c345e67ba9bc 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c @@ -493,8 +493,7 @@ static void cedrus_h264_irq_disable(struct cedrus_ctx *ctx) reg & ~VE_H264_CTRL_INT_MASK); } -static void cedrus_h264_setup(struct cedrus_ctx *ctx, - struct cedrus_run *run) +static int cedrus_h264_setup(struct cedrus_ctx *ctx, struct cedrus_run *run) { struct cedrus_dev *dev = ctx->dev; @@ -510,6 +509,8 @@ static void cedrus_h264_setup(struct cedrus_ctx *ctx, cedrus_write_frame_list(ctx, run); cedrus_set_params(ctx, run); + + return 0; } static int cedrus_h264_start(struct cedrus_ctx *ctx) diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c index 44f385be9f6c..687f87598f78 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c @@ -143,10 +143,13 @@ static void cedrus_h265_frame_info_write_dpb(struct cedrus_ctx *ctx, for (i = 0; i < num_active_dpb_entries; i++) { int buffer_index = vb2_find_timestamp(vq, dpb[i].timestamp, 0); u32 pic_order_cnt[2] = { - dpb[i].pic_order_cnt[0], - dpb[i].pic_order_cnt[1] + dpb[i].pic_order_cnt_val, + dpb[i].pic_order_cnt_val }; + if (buffer_index < 0) + continue; + cedrus_h265_frame_info_write_single(ctx, i, dpb[i].field_pic, pic_order_cnt, buffer_index); @@ -301,8 +304,91 @@ static void cedrus_h265_write_scaling_list(struct cedrus_ctx *ctx, } } -static void cedrus_h265_setup(struct cedrus_ctx *ctx, - struct cedrus_run *run) +static int cedrus_h265_is_low_delay(struct cedrus_run *run) +{ + const struct v4l2_ctrl_hevc_slice_params *slice_params; + const struct v4l2_hevc_dpb_entry *dpb; + s32 poc; + int i; + + slice_params = run->h265.slice_params; + poc = run->h265.decode_params->pic_order_cnt_val; + dpb = run->h265.decode_params->dpb; + + for (i = 0; i < slice_params->num_ref_idx_l0_active_minus1 + 1; i++) + if (dpb[slice_params->ref_idx_l0[i]].pic_order_cnt_val > poc) + return 1; + + if (slice_params->slice_type != V4L2_HEVC_SLICE_TYPE_B) + return 0; + + for (i = 0; i < slice_params->num_ref_idx_l1_active_minus1 + 1; i++) + if (dpb[slice_params->ref_idx_l1[i]].pic_order_cnt_val > poc) + return 1; + + return 0; +} + +static void cedrus_h265_write_tiles(struct cedrus_ctx *ctx, + struct cedrus_run *run, + unsigned int ctb_addr_x, + unsigned int ctb_addr_y) +{ + const struct v4l2_ctrl_hevc_slice_params *slice_params; + const struct v4l2_ctrl_hevc_pps *pps; + struct cedrus_dev *dev = ctx->dev; + const u32 *entry_points; + u32 *entry_points_buf; + int i, x, tx, y, ty; + + pps = run->h265.pps; + slice_params = run->h265.slice_params; + entry_points = run->h265.entry_points; + entry_points_buf = ctx->codec.h265.entry_points_buf; + + for (x = 0, tx = 0; tx < pps->num_tile_columns_minus1 + 1; tx++) { + if (x + pps->column_width_minus1[tx] + 1 > ctb_addr_x) + break; + + x += pps->column_width_minus1[tx] + 1; + } + + for (y = 0, ty = 0; ty < pps->num_tile_rows_minus1 + 1; ty++) { + if (y + pps->row_height_minus1[ty] + 1 > ctb_addr_y) + break; + + y += pps->row_height_minus1[ty] + 1; + } + + cedrus_write(dev, VE_DEC_H265_TILE_START_CTB, (y << 16) | (x << 0)); + cedrus_write(dev, VE_DEC_H265_TILE_END_CTB, + ((y + pps->row_height_minus1[ty]) << 16) | + ((x + pps->column_width_minus1[tx]) << 0)); + + if (pps->flags & V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED) { + for (i = 0; i < slice_params->num_entry_point_offsets; i++) + entry_points_buf[i] = entry_points[i]; + } else { + for (i = 0; i < slice_params->num_entry_point_offsets; i++) { + if (tx + 1 >= pps->num_tile_columns_minus1 + 1) { + x = 0; + tx = 0; + y += pps->row_height_minus1[ty++] + 1; + } else { + x += pps->column_width_minus1[tx++] + 1; + } + + entry_points_buf[i * 4 + 0] = entry_points[i]; + entry_points_buf[i * 4 + 1] = 0x0; + entry_points_buf[i * 4 + 2] = (y << 16) | (x << 0); + entry_points_buf[i * 4 + 3] = + ((y + pps->row_height_minus1[ty]) << 16) | + ((x + pps->column_width_minus1[tx]) << 0); + } + } +} + +static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run) { struct cedrus_dev *dev = ctx->dev; const struct v4l2_ctrl_hevc_sps *sps; @@ -312,11 +398,15 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx, const struct v4l2_hevc_pred_weight_table *pred_weight_table; unsigned int width_in_ctb_luma, ctb_size_luma; unsigned int log2_max_luma_coding_block_size; + unsigned int ctb_addr_x, ctb_addr_y; dma_addr_t src_buf_addr; dma_addr_t src_buf_end_addr; u32 chroma_log2_weight_denom; + u32 num_entry_point_offsets; u32 output_pic_list_index; u32 pic_order_cnt[2]; + u8 *padding; + int count; u32 reg; sps = run->h265.sps; @@ -324,6 +414,15 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx, slice_params = run->h265.slice_params; decode_params = run->h265.decode_params; pred_weight_table = &slice_params->pred_weight_table; + num_entry_point_offsets = slice_params->num_entry_point_offsets; + + /* + * If entry points offsets are present, we should get them + * exactly the right amount. + */ + if (num_entry_point_offsets && + num_entry_point_offsets != run->h265.entry_points_count) + return -ERANGE; log2_max_luma_coding_block_size = sps->log2_min_luma_coding_block_size_minus3 + 3 + @@ -358,8 +457,7 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx, GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING); if (!ctx->codec.h265.mv_col_buf) { ctx->codec.h265.mv_col_buf_size = 0; - // TODO: Abort the process here. - return; + return -ENOMEM; } } @@ -391,12 +489,19 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx, cedrus_write(dev, VE_DEC_H265_BITS_END_ADDR, reg); /* Coding tree block address */ - reg = VE_DEC_H265_DEC_CTB_ADDR_X(slice_params->slice_segment_addr % width_in_ctb_luma); - reg |= VE_DEC_H265_DEC_CTB_ADDR_Y(slice_params->slice_segment_addr / width_in_ctb_luma); + ctb_addr_x = slice_params->slice_segment_addr % width_in_ctb_luma; + ctb_addr_y = slice_params->slice_segment_addr / width_in_ctb_luma; + reg = VE_DEC_H265_DEC_CTB_ADDR_X(ctb_addr_x); + reg |= VE_DEC_H265_DEC_CTB_ADDR_Y(ctb_addr_y); cedrus_write(dev, VE_DEC_H265_DEC_CTB_ADDR, reg); - cedrus_write(dev, VE_DEC_H265_TILE_START_CTB, 0); - cedrus_write(dev, VE_DEC_H265_TILE_END_CTB, 0); + if ((pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED) || + (pps->flags & V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED)) { + cedrus_h265_write_tiles(ctx, run, ctb_addr_x, ctb_addr_y); + } else { + cedrus_write(dev, VE_DEC_H265_TILE_START_CTB, 0); + cedrus_write(dev, VE_DEC_H265_TILE_END_CTB, 0); + } /* Clear the number of correctly-decoded coding tree blocks. */ if (ctx->fh.m2m_ctx->new_frame) @@ -405,7 +510,30 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx, /* Initialize bitstream access. */ cedrus_write(dev, VE_DEC_H265_TRIGGER, VE_DEC_H265_TRIGGER_INIT_SWDEC); - cedrus_h265_skip_bits(dev, slice_params->data_bit_offset); + /* + * Cedrus expects that bitstream pointer is actually at the end of the slice header + * instead of start of slice data. Padding is 8 bits at most (one bit set to 1 and + * at most seven bits set to 0), so we have to inspect only one byte before slice data. + */ + + if (slice_params->data_byte_offset == 0) + return -EOPNOTSUPP; + + padding = (u8 *)vb2_plane_vaddr(&run->src->vb2_buf, 0) + + slice_params->data_byte_offset - 1; + + /* at least one bit must be set in that byte */ + if (*padding == 0) + return -EINVAL; + + for (count = 0; count < 8; count++) + if (*padding & (1 << count)) + break; + + /* Include the one bit. */ + count++; + + cedrus_h265_skip_bits(dev, slice_params->data_byte_offset * 8 - count); /* Bitstream parameters. */ @@ -500,7 +628,9 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx, V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED, pps->flags); - /* TODO: VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TILES_ENABLED */ + reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TILES_ENABLED, + V4L2_HEVC_PPS_FLAG_TILES_ENABLED, + pps->flags); reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TRANSQUANT_BYPASS_ENABLED, V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED, @@ -559,7 +689,6 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx, reg = VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_TC_OFFSET_DIV2(slice_params->slice_tc_offset_div2) | VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_BETA_OFFSET_DIV2(slice_params->slice_beta_offset_div2) | - VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_POC_BIGEST_IN_RPS_ST(decode_params->num_poc_st_curr_after == 0) | VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CR_QP_OFFSET(slice_params->slice_cr_qp_offset) | VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CB_QP_OFFSET(slice_params->slice_cb_qp_offset) | VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_QP_DELTA(slice_params->slice_qp_delta); @@ -572,16 +701,22 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx, V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED, slice_params->flags); + if (slice_params->slice_type != V4L2_HEVC_SLICE_TYPE_I && !cedrus_h265_is_low_delay(run)) + reg |= VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_NOT_LOW_DELAY; + cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO1, reg); chroma_log2_weight_denom = pred_weight_table->luma_log2_weight_denom + pred_weight_table->delta_chroma_log2_weight_denom; - reg = VE_DEC_H265_DEC_SLICE_HDR_INFO2_NUM_ENTRY_POINT_OFFSETS(0) | + reg = VE_DEC_H265_DEC_SLICE_HDR_INFO2_NUM_ENTRY_POINT_OFFSETS(num_entry_point_offsets) | VE_DEC_H265_DEC_SLICE_HDR_INFO2_CHROMA_LOG2_WEIGHT_DENOM(chroma_log2_weight_denom) | VE_DEC_H265_DEC_SLICE_HDR_INFO2_LUMA_LOG2_WEIGHT_DENOM(pred_weight_table->luma_log2_weight_denom); cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO2, reg); + cedrus_write(dev, VE_DEC_H265_ENTRY_POINT_OFFSET_ADDR, + ctx->codec.h265.entry_points_buf_addr >> 8); + /* Decoded picture size. */ reg = VE_DEC_H265_DEC_PIC_SIZE_WIDTH(ctx->src_fmt.width) | @@ -659,6 +794,8 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx, /* Enable appropriate interruptions. */ cedrus_write(dev, VE_DEC_H265_CTRL, VE_DEC_H265_CTRL_IRQ_MASK); + + return 0; } static int cedrus_h265_start(struct cedrus_ctx *ctx) @@ -676,6 +813,18 @@ static int cedrus_h265_start(struct cedrus_ctx *ctx) if (!ctx->codec.h265.neighbor_info_buf) return -ENOMEM; + ctx->codec.h265.entry_points_buf = + dma_alloc_coherent(dev->dev, CEDRUS_H265_ENTRY_POINTS_BUF_SIZE, + &ctx->codec.h265.entry_points_buf_addr, + GFP_KERNEL); + if (!ctx->codec.h265.entry_points_buf) { + dma_free_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE, + ctx->codec.h265.neighbor_info_buf, + ctx->codec.h265.neighbor_info_buf_addr, + DMA_ATTR_NO_KERNEL_MAPPING); + return -ENOMEM; + } + return 0; } @@ -696,6 +845,9 @@ static void cedrus_h265_stop(struct cedrus_ctx *ctx) ctx->codec.h265.neighbor_info_buf, ctx->codec.h265.neighbor_info_buf_addr, DMA_ATTR_NO_KERNEL_MAPPING); + dma_free_coherent(dev->dev, CEDRUS_H265_ENTRY_POINTS_BUF_SIZE, + ctx->codec.h265.entry_points_buf, + ctx->codec.h265.entry_points_buf_addr); } static void cedrus_h265_trigger(struct cedrus_ctx *ctx) diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c index 5dad2f296c6d..4cfc4a3c8a7f 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c @@ -48,7 +48,7 @@ static void cedrus_mpeg2_irq_disable(struct cedrus_ctx *ctx) cedrus_write(dev, VE_DEC_MPEG_CTRL, reg); } -static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run) +static int cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run) { const struct v4l2_ctrl_mpeg2_sequence *seq; const struct v4l2_ctrl_mpeg2_picture *pic; @@ -185,6 +185,8 @@ static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run) VE_DEC_MPEG_CTRL_MC_CACHE_EN; cedrus_write(dev, VE_DEC_MPEG_CTRL, reg); + + return 0; } static void cedrus_mpeg2_trigger(struct cedrus_ctx *ctx) diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h index bdb062ad8682..d81f7513ade0 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h +++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h @@ -377,13 +377,12 @@ #define VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED BIT(23) #define VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED BIT(22) +#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_NOT_LOW_DELAY BIT(21) #define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_TC_OFFSET_DIV2(v) \ SHIFT_AND_MASK_BITS(v, 31, 28) #define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_BETA_OFFSET_DIV2(v) \ SHIFT_AND_MASK_BITS(v, 27, 24) -#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_POC_BIGEST_IN_RPS_ST(v) \ - ((v) ? BIT(21) : 0) #define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CR_QP_OFFSET(v) \ SHIFT_AND_MASK_BITS(v, 20, 16) #define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CB_QP_OFFSET(v) \ diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c index 33726175d980..66714609b577 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c @@ -568,7 +568,6 @@ int cedrus_queue_init(void *priv, struct vb2_queue *src_vq, src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; src_vq->io_modes = VB2_MMAP | VB2_DMABUF; - src_vq->dma_attrs = DMA_ATTR_NO_KERNEL_MAPPING; src_vq->drv_priv = ctx; src_vq->buf_struct_size = sizeof(struct cedrus_buffer); src_vq->ops = &cedrus_qops; diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_vp8.c b/drivers/staging/media/sunxi/cedrus/cedrus_vp8.c index f4016684b32d..3f750d1795b6 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_vp8.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_vp8.c @@ -651,8 +651,7 @@ static void cedrus_vp8_irq_disable(struct cedrus_ctx *ctx) reg & ~VE_H264_CTRL_INT_MASK); } -static void cedrus_vp8_setup(struct cedrus_ctx *ctx, - struct cedrus_run *run) +static int cedrus_vp8_setup(struct cedrus_ctx *ctx, struct cedrus_run *run) { const struct v4l2_ctrl_vp8_frame *slice = run->vp8.frame_params; struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q; @@ -855,6 +854,8 @@ static void cedrus_vp8_setup(struct cedrus_ctx *ctx, ctx->codec.vp8.last_sharpness_level = slice->lf.sharpness_level; } + + return 0; } static int cedrus_vp8_start(struct cedrus_ctx *ctx) diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c index 8e184aa4c252..9d46a36cc014 100644 --- a/drivers/staging/media/tegra-video/vi.c +++ b/drivers/staging/media/tegra-video/vi.c @@ -157,7 +157,7 @@ tegra_channel_get_remote_csi_subdev(struct tegra_vi_channel *chan) { struct media_pad *pad; - pad = media_entity_remote_pad(&chan->pad); + pad = media_pad_remote_pad_first(&chan->pad); if (!pad) return NULL; @@ -177,7 +177,7 @@ tegra_channel_get_remote_source_subdev(struct tegra_vi_channel *chan) pad = &subdev->entity.pads[0]; while (!(pad->flags & MEDIA_PAD_FL_SOURCE)) { - pad = media_entity_remote_pad(pad); + pad = media_pad_remote_pad_first(pad); if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) break; entity = pad->entity; diff --git a/drivers/staging/media/zoran/videocodec.c b/drivers/staging/media/zoran/videocodec.c index 3af7d02bd910..a0c8bde5ec11 100644 --- a/drivers/staging/media/zoran/videocodec.c +++ b/drivers/staging/media/zoran/videocodec.c @@ -16,16 +16,6 @@ #include "videocodec.h" -static int videocodec_debug; -module_param(videocodec_debug, int, 0); -MODULE_PARM_DESC(videocodec_debug, "Debug level (0-4)"); - -#define dprintk(num, format, args...) \ - do { \ - if (videocodec_debug >= num) \ - printk(format, ##args); \ - } while (0) - struct attached_list { struct videocodec *codec; struct attached_list *next; @@ -47,6 +37,7 @@ static struct codec_list *codeclist_top; struct videocodec *videocodec_attach(struct videocodec_master *master) { struct codec_list *h = codeclist_top; + struct zoran *zr; struct attached_list *a, *ptr; struct videocodec *codec; int res; @@ -56,11 +47,13 @@ struct videocodec *videocodec_attach(struct videocodec_master *master) return NULL; } - dprintk(2, "%s: '%s', flags %lx, magic %lx\n", __func__, - master->name, master->flags, master->magic); + zr = videocodec_master_to_zoran(master); + + zrdev_dbg(zr, "%s: '%s', flags %lx, magic %lx\n", __func__, + master->name, master->flags, master->magic); if (!h) { - pr_err("%s: no device available\n", __func__); + zrdev_err(zr, "%s: no device available\n", __func__); return NULL; } @@ -68,7 +61,7 @@ struct videocodec *videocodec_attach(struct videocodec_master *master) // attach only if the slave has at least the flags // expected by the master if ((master->flags & h->codec->flags) == master->flags) { - dprintk(4, "%s: try '%s'\n", __func__, h->codec->name); + zrdev_dbg(zr, "%s: try '%s'\n", __func__, h->codec->name); codec = kmemdup(h->codec, sizeof(struct videocodec), GFP_KERNEL); if (!codec) @@ -79,7 +72,7 @@ struct videocodec *videocodec_attach(struct videocodec_master *master) codec->master_data = master; res = codec->setup(codec); if (res == 0) { - dprintk(3, "%s: '%s'\n", __func__, codec->name); + zrdev_dbg(zr, "%s: '%s'\n", __func__, codec->name); ptr = kzalloc(sizeof(*ptr), GFP_KERNEL); if (!ptr) goto out_kfree; @@ -88,12 +81,13 @@ struct videocodec *videocodec_attach(struct videocodec_master *master) a = h->list; if (!a) { h->list = ptr; - dprintk(4, "videocodec: first element\n"); + zrdev_dbg(zr, "videocodec: first element\n"); } else { while (a->next) a = a->next; // find end a->next = ptr; - dprintk(4, "videocodec: in after '%s'\n", h->codec->name); + zrdev_dbg(zr, "videocodec: in after '%s'\n", + h->codec->name); } h->attached += 1; @@ -105,7 +99,7 @@ struct videocodec *videocodec_attach(struct videocodec_master *master) h = h->next; } - pr_err("%s: no codec found!\n", __func__); + zrdev_err(zr, "%s: no codec found!\n", __func__); return NULL; out_kfree: @@ -116,6 +110,7 @@ struct videocodec *videocodec_attach(struct videocodec_master *master) int videocodec_detach(struct videocodec *codec) { struct codec_list *h = codeclist_top; + struct zoran *zr; struct attached_list *a, *prev; int res; @@ -124,11 +119,13 @@ int videocodec_detach(struct videocodec *codec) return -EINVAL; } - dprintk(2, "%s: '%s', type: %x, flags %lx, magic %lx\n", __func__, - codec->name, codec->type, codec->flags, codec->magic); + zr = videocodec_to_zoran(codec); + + zrdev_dbg(zr, "%s: '%s', type: %x, flags %lx, magic %lx\n", __func__, + codec->name, codec->type, codec->flags, codec->magic); if (!h) { - pr_err("%s: no device left...\n", __func__); + zrdev_err(zr, "%s: no device left...\n", __func__); return -ENXIO; } @@ -139,18 +136,19 @@ int videocodec_detach(struct videocodec *codec) if (codec == a->codec) { res = a->codec->unset(a->codec); if (res >= 0) { - dprintk(3, "%s: '%s'\n", __func__, a->codec->name); + zrdev_dbg(zr, "%s: '%s'\n", __func__, + a->codec->name); a->codec->master_data = NULL; } else { - pr_err("%s: '%s'\n", __func__, a->codec->name); + zrdev_err(zr, "%s: '%s'\n", __func__, a->codec->name); a->codec->master_data = NULL; } if (!prev) { h->list = a->next; - dprintk(4, "videocodec: delete first\n"); + zrdev_dbg(zr, "videocodec: delete first\n"); } else { prev->next = a->next; - dprintk(4, "videocodec: delete middle\n"); + zrdev_dbg(zr, "videocodec: delete middle\n"); } kfree(a->codec); kfree(a); @@ -163,22 +161,25 @@ int videocodec_detach(struct videocodec *codec) h = h->next; } - pr_err("%s: given codec not found!\n", __func__); + zrdev_err(zr, "%s: given codec not found!\n", __func__); return -EINVAL; } int videocodec_register(const struct videocodec *codec) { struct codec_list *ptr, *h = codeclist_top; + struct zoran *zr; if (!codec) { pr_err("%s: no data!\n", __func__); return -EINVAL; } - dprintk(2, - "videocodec: register '%s', type: %x, flags %lx, magic %lx\n", - codec->name, codec->type, codec->flags, codec->magic); + zr = videocodec_to_zoran((struct videocodec *)codec); + + zrdev_dbg(zr, + "videocodec: register '%s', type: %x, flags %lx, magic %lx\n", + codec->name, codec->type, codec->flags, codec->magic); ptr = kzalloc(sizeof(*ptr), GFP_KERNEL); if (!ptr) @@ -187,13 +188,13 @@ int videocodec_register(const struct videocodec *codec) if (!h) { codeclist_top = ptr; - dprintk(4, "videocodec: hooked in as first element\n"); + zrdev_dbg(zr, "videocodec: hooked in as first element\n"); } else { while (h->next) h = h->next; // find the end h->next = ptr; - dprintk(4, "videocodec: hooked in after '%s'\n", - h->codec->name); + zrdev_dbg(zr, "videocodec: hooked in after '%s'\n", + h->codec->name); } return 0; @@ -202,37 +203,41 @@ int videocodec_register(const struct videocodec *codec) int videocodec_unregister(const struct videocodec *codec) { struct codec_list *prev = NULL, *h = codeclist_top; + struct zoran *zr; if (!codec) { pr_err("%s: no data!\n", __func__); return -EINVAL; } - dprintk(2, - "videocodec: unregister '%s', type: %x, flags %lx, magic %lx\n", - codec->name, codec->type, codec->flags, codec->magic); + zr = videocodec_to_zoran((struct videocodec *)codec); + + zrdev_dbg(zr, + "videocodec: unregister '%s', type: %x, flags %lx, magic %lx\n", + codec->name, codec->type, codec->flags, codec->magic); if (!h) { - pr_err("%s: no device left...\n", __func__); + zrdev_err(zr, "%s: no device left...\n", __func__); return -ENXIO; } while (h) { if (codec == h->codec) { if (h->attached) { - pr_err("videocodec: '%s' is used\n", h->codec->name); + zrdev_err(zr, "videocodec: '%s' is used\n", + h->codec->name); return -EBUSY; } - dprintk(3, "videocodec: unregister '%s' is ok.\n", - h->codec->name); + zrdev_dbg(zr, "videocodec: unregister '%s' is ok.\n", + h->codec->name); if (!prev) { codeclist_top = h->next; - dprintk(4, - "videocodec: delete first element\n"); + zrdev_dbg(zr, + "videocodec: delete first element\n"); } else { prev->next = h->next; - dprintk(4, - "videocodec: delete middle element\n"); + zrdev_dbg(zr, + "videocodec: delete middle element\n"); } kfree(h); return 0; @@ -241,7 +246,7 @@ int videocodec_unregister(const struct videocodec *codec) h = h->next; } - pr_err("%s: given codec not found!\n", __func__); + zrdev_err(zr, "%s: given codec not found!\n", __func__); return -EINVAL; } diff --git a/drivers/staging/media/zoran/videocodec.h b/drivers/staging/media/zoran/videocodec.h index 9dea348fee40..5e6057edd339 100644 --- a/drivers/staging/media/zoran/videocodec.h +++ b/drivers/staging/media/zoran/videocodec.h @@ -307,4 +307,19 @@ extern int videocodec_unregister(const struct videocodec *); int videocodec_debugfs_show(struct seq_file *m); +#include "zoran.h" +static inline struct zoran *videocodec_master_to_zoran(struct videocodec_master *master) +{ + struct zoran *zr = master->data; + + return zr; +} + +static inline struct zoran *videocodec_to_zoran(struct videocodec *codec) +{ + struct videocodec_master *master = codec->master_data; + + return videocodec_master_to_zoran(master); +} + #endif /*ifndef __LINUX_VIDEOCODEC_H */ diff --git a/drivers/staging/media/zoran/zoran.h b/drivers/staging/media/zoran/zoran.h index 654c95fa5aba..05227e5298f6 100644 --- a/drivers/staging/media/zoran/zoran.h +++ b/drivers/staging/media/zoran/zoran.h @@ -19,6 +19,8 @@ #define _BUZ_H_ #include <linux/debugfs.h> +#include <linux/pci.h> +#include <linux/i2c-algo-bit.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> #include <media/videobuf2-core.h> @@ -301,6 +303,18 @@ static inline struct zoran *to_zoran(struct v4l2_device *v4l2_dev) #endif +/* + * Debugging macros + */ +#define zrdev_dbg(zr, format, args...) \ + pci_dbg((zr)->pci_dev, format, ##args) \ + +#define zrdev_err(zr, format, args...) \ + pci_err((zr)->pci_dev, format, ##args) \ + +#define zrdev_info(zr, format, args...) \ + pci_info((zr)->pci_dev, format, ##args) \ + int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq, int dir); void zoran_queue_exit(struct zoran *zr); int zr_set_buf(struct zoran *zr); diff --git a/drivers/staging/media/zoran/zr36016.c b/drivers/staging/media/zoran/zr36016.c index 26c7c32b6bc0..0e0532537a3e 100644 --- a/drivers/staging/media/zoran/zr36016.c +++ b/drivers/staging/media/zoran/zr36016.c @@ -22,17 +22,6 @@ /* amount of chips attached via this driver */ static int zr36016_codecs; -static int zr36016_debug; -module_param(zr36016_debug, int, 0); -MODULE_PARM_DESC(zr36016_debug, "Debug level (0-4)"); - - -#define dprintk(num, format, args...) \ - do { \ - if (zr36016_debug >= num) \ - printk(format, ##args); \ - } while (0) - /* ========================================================================= Local hardware I/O functions: @@ -43,27 +32,30 @@ MODULE_PARM_DESC(zr36016_debug, "Debug level (0-4)"); static u8 zr36016_read(struct zr36016 *ptr, u16 reg) { u8 value = 0; + struct zoran *zr = videocodec_to_zoran(ptr->codec); /* just in case something is wrong... */ if (ptr->codec->master_data->readreg) value = (ptr->codec->master_data->readreg(ptr->codec, reg)) & 0xFF; else - pr_err("%s: invalid I/O setup, nothing read!\n", ptr->name); + zrdev_err(zr, "%s: invalid I/O setup, nothing read!\n", ptr->name); - dprintk(4, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, value); + zrdev_dbg(zr, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, value); return value; } static void zr36016_write(struct zr36016 *ptr, u16 reg, u8 value) { - dprintk(4, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, reg); + struct zoran *zr = videocodec_to_zoran(ptr->codec); + + zrdev_dbg(zr, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, reg); // just in case something is wrong... if (ptr->codec->master_data->writereg) ptr->codec->master_data->writereg(ptr->codec, reg, value); else - pr_err("%s: invalid I/O setup, nothing written!\n", ptr->name); + zrdev_err(zr, "%s: invalid I/O setup, nothing written!\n", ptr->name); } /* indirect read and write functions */ @@ -72,30 +64,34 @@ static void zr36016_write(struct zr36016 *ptr, u16 reg, u8 value) static u8 zr36016_readi(struct zr36016 *ptr, u16 reg) { u8 value = 0; + struct zoran *zr = videocodec_to_zoran(ptr->codec); /* just in case something is wrong... */ if ((ptr->codec->master_data->writereg) && (ptr->codec->master_data->readreg)) { ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR value = (ptr->codec->master_data->readreg(ptr->codec, ZR016_IDATA)) & 0xFF; // DATA } else { - pr_err("%s: invalid I/O setup, nothing read (i)!\n", ptr->name); + zrdev_err(zr, "%s: invalid I/O setup, nothing read (i)!\n", ptr->name); } - dprintk(4, "%s: reading indirect from 0x%04x: %02x\n", ptr->name, reg, value); + zrdev_dbg(zr, "%s: reading indirect from 0x%04x: %02x\n", + ptr->name, reg, value); return value; } static void zr36016_writei(struct zr36016 *ptr, u16 reg, u8 value) { - dprintk(4, "%s: writing indirect 0x%02x to 0x%04x\n", ptr->name, - value, reg); + struct zoran *zr = videocodec_to_zoran(ptr->codec); + + zrdev_dbg(zr, "%s: writing indirect 0x%02x to 0x%04x\n", ptr->name, + value, reg); /* just in case something is wrong... */ if (ptr->codec->master_data->writereg) { ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR ptr->codec->master_data->writereg(ptr->codec, ZR016_IDATA, value & 0x0FF); // DATA } else { - pr_err("%s: invalid I/O setup, nothing written (i)!\n", ptr->name); + zrdev_err(zr, "%s: invalid I/O setup, nothing written (i)!\n", ptr->name); } } @@ -120,32 +116,34 @@ static u8 zr36016_read_version(struct zr36016 *ptr) static int zr36016_basic_test(struct zr36016 *ptr) { - if (zr36016_debug) { + struct zoran *zr = videocodec_to_zoran(ptr->codec); + + if (*KERN_INFO <= CONSOLE_LOGLEVEL_DEFAULT) { int i; zr36016_writei(ptr, ZR016I_PAX_LO, 0x55); - dprintk(1, KERN_INFO "%s: registers: ", ptr->name); + zrdev_dbg(zr, "%s: registers: ", ptr->name); for (i = 0; i <= 0x0b; i++) - dprintk(1, "%02x ", zr36016_readi(ptr, i)); - dprintk(1, "\n"); + zrdev_dbg(zr, "%02x ", zr36016_readi(ptr, i)); + zrdev_dbg(zr, "\n"); } // for testing just write 0, then the default value to a register and read // it back in both cases zr36016_writei(ptr, ZR016I_PAX_LO, 0x00); if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0) { - pr_err("%s: attach failed, can't connect to vfe processor!\n", ptr->name); + zrdev_err(zr, "%s: attach failed, can't connect to vfe processor!\n", ptr->name); return -ENXIO; } zr36016_writei(ptr, ZR016I_PAX_LO, 0x0d0); if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0d0) { - pr_err("%s: attach failed, can't connect to vfe processor!\n", ptr->name); + zrdev_err(zr, "%s: attach failed, can't connect to vfe processor!\n", ptr->name); return -ENXIO; } // we allow version numbers from 0-3, should be enough, though zr36016_read_version(ptr); if (ptr->version & 0x0c) { - pr_err("%s: attach failed, suspicious version %d found...\n", ptr->name, - ptr->version); + zrdev_err(zr, "%s: attach failed, suspicious version %d found...\n", ptr->name, + ptr->version); return -ENXIO; } @@ -164,10 +162,11 @@ static int zr36016_pushit(struct zr36016 *ptr, u16 len, const char *data) { + struct zoran *zr = videocodec_to_zoran(ptr->codec); int i = 0; - dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", - ptr->name, startreg, len); + zrdev_dbg(zr, "%s: write data block to 0x%04x (len=%d)\n", + ptr->name, startreg, len); while (i < len) { zr36016_writei(ptr, startreg++, data[i++]); } @@ -225,8 +224,9 @@ static void zr36016_init(struct zr36016 *ptr) static int zr36016_set_mode(struct videocodec *codec, int mode) { struct zr36016 *ptr = (struct zr36016 *)codec->data; + struct zoran *zr = videocodec_to_zoran(codec); - dprintk(2, "%s: set_mode %d call\n", ptr->name, mode); + zrdev_dbg(zr, "%s: set_mode %d call\n", ptr->name, mode); if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION)) return -EINVAL; @@ -242,11 +242,12 @@ static int zr36016_set_video(struct videocodec *codec, const struct tvnorm *norm struct vfe_settings *cap, struct vfe_polarity *pol) { struct zr36016 *ptr = (struct zr36016 *)codec->data; + struct zoran *zr = videocodec_to_zoran(codec); - dprintk(2, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) call\n", - ptr->name, norm->h_start, norm->v_start, - cap->x, cap->y, cap->width, cap->height, - cap->decimation); + zrdev_dbg(zr, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) call\n", + ptr->name, norm->h_start, norm->v_start, + cap->x, cap->y, cap->width, cap->height, + cap->decimation); /* if () return -EINVAL; * trust the master driver that it knows what it does - so @@ -276,9 +277,11 @@ static int zr36016_set_video(struct videocodec *codec, const struct tvnorm *norm static int zr36016_control(struct videocodec *codec, int type, int size, void *data) { struct zr36016 *ptr = (struct zr36016 *)codec->data; + struct zoran *zr = videocodec_to_zoran(codec); int *ival = (int *)data; - dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, size); + zrdev_dbg(zr, "%s: control %d call with %d byte\n", + ptr->name, type, size); switch (type) { case CODEC_G_STATUS: /* get last status - we don't know it ... */ @@ -325,11 +328,12 @@ static int zr36016_control(struct videocodec *codec, int type, int size, void *d static int zr36016_unset(struct videocodec *codec) { struct zr36016 *ptr = codec->data; + struct zoran *zr = videocodec_to_zoran(codec); if (ptr) { /* do wee need some codec deinit here, too ???? */ - dprintk(1, "%s: finished codec #%d\n", ptr->name, ptr->num); + zrdev_dbg(zr, "%s: finished codec #%d\n", ptr->name, ptr->num); kfree(ptr); codec->data = NULL; @@ -352,12 +356,13 @@ static int zr36016_unset(struct videocodec *codec) static int zr36016_setup(struct videocodec *codec) { struct zr36016 *ptr; + struct zoran *zr = videocodec_to_zoran(codec); int res; - dprintk(2, "zr36016: initializing VFE subsystem #%d.\n", zr36016_codecs); + zrdev_dbg(zr, "zr36016: initializing VFE subsystem #%d.\n", zr36016_codecs); if (zr36016_codecs == MAX_CODECS) { - pr_err("zr36016: Can't attach more codecs!\n"); + zrdev_err(zr, "zr36016: Can't attach more codecs!\n"); return -ENOSPC; } //mem structure init @@ -384,7 +389,8 @@ static int zr36016_setup(struct videocodec *codec) ptr->ydec = 0; zr36016_init(ptr); - dprintk(1, KERN_INFO "%s: codec v%d attached and running\n", ptr->name, ptr->version); + zrdev_dbg(zr, "%s: codec v%d attached and running\n", + ptr->name, ptr->version); return 0; } @@ -417,9 +423,8 @@ int zr36016_init_module(void) void zr36016_cleanup_module(void) { if (zr36016_codecs) { - dprintk(1, - "zr36016: something's wrong - %d codecs left somehow.\n", - zr36016_codecs); + pr_debug("zr36016: something's wrong - %d codecs left somehow.\n", + zr36016_codecs); } videocodec_unregister(&zr36016_codec); } diff --git a/drivers/staging/media/zoran/zr36050.c b/drivers/staging/media/zoran/zr36050.c index 38f7021e7b06..6a7ef28d996c 100644 --- a/drivers/staging/media/zoran/zr36050.c +++ b/drivers/staging/media/zoran/zr36050.c @@ -29,17 +29,6 @@ /* amount of chips attached via this driver */ static int zr36050_codecs; -/* debugging is available via module parameter */ -static int zr36050_debug; -module_param(zr36050_debug, int, 0); -MODULE_PARM_DESC(zr36050_debug, "Debug level (0-4)"); - -#define dprintk(num, format, args...) \ - do { \ - if (zr36050_debug >= num) \ - printk(format, ##args); \ - } while (0) - /* ========================================================================= Local hardware I/O functions: @@ -49,32 +38,32 @@ MODULE_PARM_DESC(zr36050_debug, "Debug level (0-4)"); /* read and write functions */ static u8 zr36050_read(struct zr36050 *ptr, u16 reg) { + struct zoran *zr = videocodec_to_zoran(ptr->codec); u8 value = 0; /* just in case something is wrong... */ if (ptr->codec->master_data->readreg) value = (ptr->codec->master_data->readreg(ptr->codec, reg)) & 0xFF; else - dprintk(1, - KERN_ERR "%s: invalid I/O setup, nothing read!\n", ptr->name); + zrdev_err(zr, "%s: invalid I/O setup, nothing read!\n", ptr->name); - dprintk(4, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, value); + zrdev_dbg(zr, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, value); return value; } static void zr36050_write(struct zr36050 *ptr, u16 reg, u8 value) { - dprintk(4, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, reg); + struct zoran *zr = videocodec_to_zoran(ptr->codec); + + zrdev_dbg(zr, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, reg); /* just in case something is wrong... */ if (ptr->codec->master_data->writereg) ptr->codec->master_data->writereg(ptr->codec, reg, value); else - dprintk(1, - KERN_ERR - "%s: invalid I/O setup, nothing written!\n", - ptr->name); + zrdev_err(zr, "%s: invalid I/O setup, nothing written!\n", + ptr->name); } /* ========================================================================= @@ -117,14 +106,15 @@ static u16 zr36050_read_scalefactor(struct zr36050 *ptr) static void zr36050_wait_end(struct zr36050 *ptr) { + struct zoran *zr = videocodec_to_zoran(ptr->codec); int i = 0; while (!(zr36050_read_status1(ptr) & 0x4)) { udelay(1); if (i++ > 200000) { // 200ms, there is for sure something wrong!!! - dprintk(1, - "%s: timeout at wait_end (last status: 0x%02x)\n", - ptr->name, ptr->status1); + zrdev_err(zr, + "%s: timeout at wait_end (last status: 0x%02x)\n", + ptr->name, ptr->status1); break; } } @@ -138,33 +128,32 @@ static void zr36050_wait_end(struct zr36050 *ptr) static int zr36050_basic_test(struct zr36050 *ptr) { + struct zoran *zr = videocodec_to_zoran(ptr->codec); + zr36050_write(ptr, ZR050_SOF_IDX, 0x00); zr36050_write(ptr, ZR050_SOF_IDX + 1, 0x00); if ((zr36050_read(ptr, ZR050_SOF_IDX) | zr36050_read(ptr, ZR050_SOF_IDX + 1)) != 0x0000) { - dprintk(1, - KERN_ERR - "%s: attach failed, can't connect to jpeg processor!\n", - ptr->name); + zrdev_err(zr, + "%s: attach failed, can't connect to jpeg processor!\n", + ptr->name); return -ENXIO; } zr36050_write(ptr, ZR050_SOF_IDX, 0xff); zr36050_write(ptr, ZR050_SOF_IDX + 1, 0xc0); if (((zr36050_read(ptr, ZR050_SOF_IDX) << 8) | zr36050_read(ptr, ZR050_SOF_IDX + 1)) != 0xffc0) { - dprintk(1, - KERN_ERR - "%s: attach failed, can't connect to jpeg processor!\n", - ptr->name); + zrdev_err(zr, + "%s: attach failed, can't connect to jpeg processor!\n", + ptr->name); return -ENXIO; } zr36050_wait_end(ptr); if ((ptr->status1 & 0x4) == 0) { - dprintk(1, - KERN_ERR - "%s: attach failed, jpeg processor failed (end flag)!\n", - ptr->name); + zrdev_err(zr, + "%s: attach failed, jpeg processor failed (end flag)!\n", + ptr->name); return -EBUSY; } @@ -179,10 +168,11 @@ static int zr36050_basic_test(struct zr36050 *ptr) static int zr36050_pushit(struct zr36050 *ptr, u16 startreg, u16 len, const char *data) { + struct zoran *zr = videocodec_to_zoran(ptr->codec); int i = 0; - dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name, - startreg, len); + zrdev_dbg(zr, "%s: write data block to 0x%04x (len=%d)\n", ptr->name, + startreg, len); while (i < len) zr36050_write(ptr, startreg++, data[i++]); @@ -305,11 +295,12 @@ static const char zr36050_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 }; static int zr36050_set_sof(struct zr36050 *ptr) { + struct zoran *zr = videocodec_to_zoran(ptr->codec); char sof_data[34]; // max. size of register set int i; - dprintk(3, "%s: write SOF (%dx%d, %d components)\n", ptr->name, - ptr->width, ptr->height, NO_OF_COMPONENTS); + zrdev_dbg(zr, "%s: write SOF (%dx%d, %d components)\n", ptr->name, + ptr->width, ptr->height, NO_OF_COMPONENTS); sof_data[0] = 0xff; sof_data[1] = 0xc0; sof_data[2] = 0x00; @@ -336,10 +327,11 @@ static int zr36050_set_sof(struct zr36050 *ptr) static int zr36050_set_sos(struct zr36050 *ptr) { + struct zoran *zr = videocodec_to_zoran(ptr->codec); char sos_data[16]; // max. size of register set int i; - dprintk(3, "%s: write SOS\n", ptr->name); + zrdev_dbg(zr, "%s: write SOS\n", ptr->name); sos_data[0] = 0xff; sos_data[1] = 0xda; sos_data[2] = 0x00; @@ -363,9 +355,10 @@ static int zr36050_set_sos(struct zr36050 *ptr) static int zr36050_set_dri(struct zr36050 *ptr) { + struct zoran *zr = videocodec_to_zoran(ptr->codec); char dri_data[6]; // max. size of register set - dprintk(3, "%s: write DRI\n", ptr->name); + zrdev_dbg(zr, "%s: write DRI\n", ptr->name); dri_data[0] = 0xff; dri_data[1] = 0xdd; dri_data[2] = 0x00; @@ -387,9 +380,10 @@ static void zr36050_init(struct zr36050 *ptr) { int sum = 0; long bitcnt, tmp; + struct zoran *zr = videocodec_to_zoran(ptr->codec); if (ptr->mode == CODEC_DO_COMPRESSION) { - dprintk(2, "%s: COMPRESSION SETUP\n", ptr->name); + zrdev_dbg(zr, "%s: COMPRESSION SETUP\n", ptr->name); /* 050 communicates with 057 in master mode */ zr36050_write(ptr, ZR050_HARDWARE, ZR050_HW_MSTR); @@ -419,7 +413,7 @@ static void zr36050_init(struct zr36050 *ptr) /* setup the fixed jpeg tables - maybe variable, though - * (see table init section above) */ - dprintk(3, "%s: write DQT, DHT, APP\n", ptr->name); + zrdev_dbg(zr, "%s: write DQT, DHT, APP\n", ptr->name); sum += zr36050_pushit(ptr, ZR050_DQT_IDX, sizeof(zr36050_dqt), zr36050_dqt); sum += zr36050_pushit(ptr, ZR050_DHT_IDX, @@ -442,11 +436,11 @@ static void zr36050_init(struct zr36050 *ptr) zr36050_write(ptr, ZR050_GO, 1); // launch codec zr36050_wait_end(ptr); - dprintk(2, "%s: Status after table preload: 0x%02x\n", - ptr->name, ptr->status1); + zrdev_dbg(zr, "%s: Status after table preload: 0x%02x\n", + ptr->name, ptr->status1); if ((ptr->status1 & 0x4) == 0) { - pr_err("%s: init aborted!\n", ptr->name); + zrdev_err(zr, "%s: init aborted!\n", ptr->name); return; // something is wrong, its timed out!!!! } @@ -457,9 +451,9 @@ static void zr36050_init(struct zr36050 *ptr) bitcnt = sum << 3; /* need the size in bits */ tmp = bitcnt >> 16; - dprintk(3, - "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n", - ptr->name, sum, ptr->real_code_vol, bitcnt, tmp); + zrdev_dbg(zr, + "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n", + ptr->name, sum, ptr->real_code_vol, bitcnt, tmp); zr36050_write(ptr, ZR050_TCV_NET_HI, tmp >> 8); zr36050_write(ptr, ZR050_TCV_NET_MH, tmp & 0xff); tmp = bitcnt & 0xffff; @@ -470,8 +464,8 @@ static void zr36050_init(struct zr36050 *ptr) bitcnt -= ((bitcnt * 5) >> 6); // bits without eob tmp = bitcnt >> 16; - dprintk(3, "%s: code: nettobit=%ld, highnettobits=%ld\n", - ptr->name, bitcnt, tmp); + zrdev_dbg(zr, "%s: code: nettobit=%ld, highnettobits=%ld\n", + ptr->name, bitcnt, tmp); zr36050_write(ptr, ZR050_TCV_DATA_HI, tmp >> 8); zr36050_write(ptr, ZR050_TCV_DATA_MH, tmp & 0xff); tmp = bitcnt & 0xffff; @@ -489,7 +483,7 @@ static void zr36050_init(struct zr36050 *ptr) ((ptr->app.len > 0) ? ZR050_ME_APP : 0) | ((ptr->com.len > 0) ? ZR050_ME_COM : 0)); } else { - dprintk(2, "%s: EXPANSION SETUP\n", ptr->name); + zrdev_dbg(zr, "%s: EXPANSION SETUP\n", ptr->name); /* 050 communicates with 055 in master mode */ zr36050_write(ptr, ZR050_HARDWARE, @@ -502,7 +496,7 @@ static void zr36050_init(struct zr36050 *ptr) zr36050_write(ptr, ZR050_INT_REQ_0, 0); zr36050_write(ptr, ZR050_INT_REQ_1, 3); // low 2 bits always 1 - dprintk(3, "%s: write DHT\n", ptr->name); + zrdev_dbg(zr, "%s: write DHT\n", ptr->name); zr36050_pushit(ptr, ZR050_DHT_IDX, sizeof(zr36050_dht), zr36050_dht); @@ -511,11 +505,11 @@ static void zr36050_init(struct zr36050 *ptr) zr36050_write(ptr, ZR050_GO, 1); // launch codec zr36050_wait_end(ptr); - dprintk(2, "%s: Status after table preload: 0x%02x\n", - ptr->name, ptr->status1); + zrdev_dbg(zr, "%s: Status after table preload: 0x%02x\n", + ptr->name, ptr->status1); if ((ptr->status1 & 0x4) == 0) { - pr_err("%s: init aborted!\n", ptr->name); + zrdev_err(zr, "%s: init aborted!\n", ptr->name); return; // something is wrong, its timed out!!!! } @@ -539,8 +533,9 @@ static void zr36050_init(struct zr36050 *ptr) static int zr36050_set_mode(struct videocodec *codec, int mode) { struct zr36050 *ptr = (struct zr36050 *)codec->data; + struct zoran *zr = videocodec_to_zoran(codec); - dprintk(2, "%s: set_mode %d call\n", ptr->name, mode); + zrdev_dbg(zr, "%s: set_mode %d call\n", ptr->name, mode); if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION)) return -EINVAL; @@ -556,12 +551,13 @@ static int zr36050_set_video(struct videocodec *codec, const struct tvnorm *norm struct vfe_settings *cap, struct vfe_polarity *pol) { struct zr36050 *ptr = (struct zr36050 *)codec->data; + struct zoran *zr = videocodec_to_zoran(codec); int size; - dprintk(2, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) q%d call\n", - ptr->name, norm->h_start, norm->v_start, - cap->x, cap->y, cap->width, cap->height, - cap->decimation, cap->quality); + zrdev_dbg(zr, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) q%d call\n", + ptr->name, norm->h_start, norm->v_start, + cap->x, cap->y, cap->width, cap->height, + cap->decimation, cap->quality); /* if () return -EINVAL; * trust the master driver that it knows what it does - so * we allow invalid startx/y and norm for now ... */ @@ -594,10 +590,11 @@ static int zr36050_set_video(struct videocodec *codec, const struct tvnorm *norm static int zr36050_control(struct videocodec *codec, int type, int size, void *data) { struct zr36050 *ptr = (struct zr36050 *)codec->data; + struct zoran *zr = videocodec_to_zoran(codec); int *ival = (int *)data; - dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, - size); + zrdev_dbg(zr, "%s: control %d call with %d byte\n", ptr->name, type, + size); switch (type) { case CODEC_G_STATUS: /* get last status */ @@ -713,12 +710,13 @@ static int zr36050_control(struct videocodec *codec, int type, int size, void *d static int zr36050_unset(struct videocodec *codec) { struct zr36050 *ptr = codec->data; + struct zoran *zr = videocodec_to_zoran(codec); if (ptr) { /* do wee need some codec deinit here, too ???? */ - dprintk(1, "%s: finished codec #%d\n", ptr->name, - ptr->num); + zrdev_dbg(zr, "%s: finished codec #%d\n", ptr->name, + ptr->num); kfree(ptr); codec->data = NULL; @@ -741,14 +739,15 @@ static int zr36050_unset(struct videocodec *codec) static int zr36050_setup(struct videocodec *codec) { struct zr36050 *ptr; + struct zoran *zr = videocodec_to_zoran(codec); int res; - dprintk(2, "zr36050: initializing MJPEG subsystem #%d.\n", - zr36050_codecs); + zrdev_dbg(zr, "zr36050: initializing MJPEG subsystem #%d.\n", + zr36050_codecs); if (zr36050_codecs == MAX_CODECS) { - dprintk(1, - KERN_ERR "zr36050: Can't attach more codecs!\n"); + zrdev_err(zr, + "zr36050: Can't attach more codecs!\n"); return -ENOSPC; } //mem structure init @@ -789,8 +788,8 @@ static int zr36050_setup(struct videocodec *codec) zr36050_init(ptr); - dprintk(1, KERN_INFO "%s: codec attached and running\n", - ptr->name); + zrdev_info(zr, "%s: codec attached and running\n", + ptr->name); return 0; } @@ -823,9 +822,8 @@ int zr36050_init_module(void) void zr36050_cleanup_module(void) { if (zr36050_codecs) { - dprintk(1, - "zr36050: something's wrong - %d codecs left somehow.\n", - zr36050_codecs); + pr_debug("zr36050: something's wrong - %d codecs left somehow.\n", + zr36050_codecs); } videocodec_unregister(&zr36050_codec); } diff --git a/drivers/staging/media/zoran/zr36060.c b/drivers/staging/media/zoran/zr36060.c index d0c369e31c81..7798016f1f96 100644 --- a/drivers/staging/media/zoran/zr36060.c +++ b/drivers/staging/media/zoran/zr36060.c @@ -32,16 +32,6 @@ static bool low_bitrate; module_param(low_bitrate, bool, 0); MODULE_PARM_DESC(low_bitrate, "Buz compatibility option, halves bitrate"); -static int zr36060_debug; -module_param(zr36060_debug, int, 0); -MODULE_PARM_DESC(zr36060_debug, "Debug level (0-4)"); - -#define dprintk(num, format, args...) \ - do { \ - if (zr36060_debug >= num) \ - printk(format, ##args); \ - } while (0) - /* ========================================================================= * Local hardware I/O functions: * read/write via codec layer (registers are located in the master device) @@ -51,25 +41,28 @@ MODULE_PARM_DESC(zr36060_debug, "Debug level (0-4)"); static u8 zr36060_read(struct zr36060 *ptr, u16 reg) { u8 value = 0; + struct zoran *zr = videocodec_to_zoran(ptr->codec); // just in case something is wrong... if (ptr->codec->master_data->readreg) value = (ptr->codec->master_data->readreg(ptr->codec, reg)) & 0xff; else - pr_err("%s: invalid I/O setup, nothing read!\n", ptr->name); + zrdev_err(zr, "%s: invalid I/O setup, nothing read!\n", ptr->name); return value; } static void zr36060_write(struct zr36060 *ptr, u16 reg, u8 value) { - dprintk(4, "0x%02x @0x%04x\n", value, reg); + struct zoran *zr = videocodec_to_zoran(ptr->codec); + + zrdev_dbg(zr, "0x%02x @0x%04x\n", value, reg); // just in case something is wrong... if (ptr->codec->master_data->writereg) ptr->codec->master_data->writereg(ptr->codec, reg, value); else - pr_err("%s: invalid I/O setup, nothing written!\n", ptr->name); + zrdev_err(zr, "%s: invalid I/O setup, nothing written!\n", ptr->name); } /* ========================================================================= @@ -101,14 +94,15 @@ static u16 zr36060_read_scalefactor(struct zr36060 *ptr) /* wait if codec is ready to proceed (end of processing) or time is over */ static void zr36060_wait_end(struct zr36060 *ptr) { + struct zoran *zr = videocodec_to_zoran(ptr->codec); int i = 0; while (zr36060_read_status(ptr) & ZR060_CFSR_BUSY) { udelay(1); if (i++ > 200000) { // 200ms, there is for sure something wrong!!! - dprintk(1, - "%s: timeout at wait_end (last status: 0x%02x)\n", - ptr->name, ptr->status); + zrdev_dbg(zr, + "%s: timeout at wait_end (last status: 0x%02x)\n", + ptr->name, ptr->status); break; } } @@ -117,15 +111,17 @@ static void zr36060_wait_end(struct zr36060 *ptr) /* Basic test of "connectivity", writes/reads to/from memory the SOF marker */ static int zr36060_basic_test(struct zr36060 *ptr) { + struct zoran *zr = videocodec_to_zoran(ptr->codec); + if ((zr36060_read(ptr, ZR060_IDR_DEV) != 0x33) && (zr36060_read(ptr, ZR060_IDR_REV) != 0x01)) { - pr_err("%s: attach failed, can't connect to jpeg processor!\n", ptr->name); + zrdev_err(zr, "%s: attach failed, can't connect to jpeg processor!\n", ptr->name); return -ENXIO; } zr36060_wait_end(ptr); if (ptr->status & ZR060_CFSR_BUSY) { - pr_err("%s: attach failed, jpeg processor failed (end flag)!\n", ptr->name); + zrdev_err(zr, "%s: attach failed, jpeg processor failed (end flag)!\n", ptr->name); return -EBUSY; } @@ -135,10 +131,11 @@ static int zr36060_basic_test(struct zr36060 *ptr) /* simple loop for pushing the init datasets */ static int zr36060_pushit(struct zr36060 *ptr, u16 startreg, u16 len, const char *data) { + struct zoran *zr = videocodec_to_zoran(ptr->codec); int i = 0; - dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name, - startreg, len); + zrdev_dbg(zr, "%s: write data block to 0x%04x (len=%d)\n", ptr->name, + startreg, len); while (i < len) zr36060_write(ptr, startreg++, data[i++]); @@ -249,11 +246,12 @@ static const char zr36060_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 }; /* SOF (start of frame) segment depends on width, height and sampling ratio of each color component */ static int zr36060_set_sof(struct zr36060 *ptr) { + struct zoran *zr = videocodec_to_zoran(ptr->codec); char sof_data[34]; // max. size of register set int i; - dprintk(3, "%s: write SOF (%dx%d, %d components)\n", ptr->name, - ptr->width, ptr->height, NO_OF_COMPONENTS); + zrdev_dbg(zr, "%s: write SOF (%dx%d, %d components)\n", ptr->name, + ptr->width, ptr->height, NO_OF_COMPONENTS); sof_data[0] = 0xff; sof_data[1] = 0xc0; sof_data[2] = 0x00; @@ -277,10 +275,11 @@ static int zr36060_set_sof(struct zr36060 *ptr) /* SOS (start of scan) segment depends on the used scan components of each color component */ static int zr36060_set_sos(struct zr36060 *ptr) { + struct zoran *zr = videocodec_to_zoran(ptr->codec); char sos_data[16]; // max. size of register set int i; - dprintk(3, "%s: write SOS\n", ptr->name); + zrdev_dbg(zr, "%s: write SOS\n", ptr->name); sos_data[0] = 0xff; sos_data[1] = 0xda; sos_data[2] = 0x00; @@ -302,9 +301,10 @@ static int zr36060_set_sos(struct zr36060 *ptr) /* DRI (define restart interval) */ static int zr36060_set_dri(struct zr36060 *ptr) { + struct zoran *zr = videocodec_to_zoran(ptr->codec); char dri_data[6]; // max. size of register set - dprintk(3, "%s: write DRI\n", ptr->name); + zrdev_dbg(zr, "%s: write DRI\n", ptr->name); dri_data[0] = 0xff; dri_data[1] = 0xdd; dri_data[2] = 0x00; @@ -321,9 +321,10 @@ static void zr36060_init(struct zr36060 *ptr) { int sum = 0; long bitcnt, tmp; + struct zoran *zr = videocodec_to_zoran(ptr->codec); if (ptr->mode == CODEC_DO_COMPRESSION) { - dprintk(2, "%s: COMPRESSION SETUP\n", ptr->name); + zrdev_dbg(zr, "%s: COMPRESSION SETUP\n", ptr->name); zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SYNC_RST); @@ -376,9 +377,9 @@ static void zr36060_init(struct zr36060 *ptr) bitcnt = sum << 3; /* need the size in bits */ tmp = bitcnt >> 16; - dprintk(3, - "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n", - ptr->name, sum, ptr->real_code_vol, bitcnt, tmp); + zrdev_dbg(zr, + "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n", + ptr->name, sum, ptr->real_code_vol, bitcnt, tmp); zr36060_write(ptr, ZR060_TCV_NET_HI, tmp >> 8); zr36060_write(ptr, ZR060_TCV_NET_MH, tmp & 0xff); tmp = bitcnt & 0xffff; @@ -389,8 +390,8 @@ static void zr36060_init(struct zr36060 *ptr) bitcnt -= ((bitcnt * 5) >> 6); // bits without eob tmp = bitcnt >> 16; - dprintk(3, "%s: code: nettobit=%ld, highnettobits=%ld\n", - ptr->name, bitcnt, tmp); + zrdev_dbg(zr, "%s: code: nettobit=%ld, highnettobits=%ld\n", + ptr->name, bitcnt, tmp); zr36060_write(ptr, ZR060_TCV_DATA_HI, tmp >> 8); zr36060_write(ptr, ZR060_TCV_DATA_MH, tmp & 0xff); tmp = bitcnt & 0xffff; @@ -408,7 +409,7 @@ static void zr36060_init(struct zr36060 *ptr) zr36060_write(ptr, ZR060_VCR, ZR060_VCR_RANGE); } else { - dprintk(2, "%s: EXPANSION SETUP\n", ptr->name); + zrdev_dbg(zr, "%s: EXPANSION SETUP\n", ptr->name); zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SYNC_RST); @@ -441,10 +442,11 @@ static void zr36060_init(struct zr36060 *ptr) /* Load the tables */ zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SYNC_RST | ZR060_LOAD_LOAD); zr36060_wait_end(ptr); - dprintk(2, "%s: Status after table preload: 0x%02x\n", ptr->name, ptr->status); + zrdev_dbg(zr, "%s: Status after table preload: 0x%02x\n", + ptr->name, ptr->status); if (ptr->status & ZR060_CFSR_BUSY) { - pr_err("%s: init aborted!\n", ptr->name); + zrdev_err(zr, "%s: init aborted!\n", ptr->name); return; // something is wrong, its timed out!!!! } } @@ -461,8 +463,9 @@ static void zr36060_init(struct zr36060 *ptr) static int zr36060_set_mode(struct videocodec *codec, int mode) { struct zr36060 *ptr = (struct zr36060 *)codec->data; + struct zoran *zr = videocodec_to_zoran(codec); - dprintk(2, "%s: set_mode %d call\n", ptr->name, mode); + zrdev_dbg(zr, "%s: set_mode %d call\n", ptr->name, mode); if (mode != CODEC_DO_EXPANSION && mode != CODEC_DO_COMPRESSION) return -EINVAL; @@ -478,11 +481,12 @@ static int zr36060_set_video(struct videocodec *codec, const struct tvnorm *norm struct vfe_settings *cap, struct vfe_polarity *pol) { struct zr36060 *ptr = (struct zr36060 *)codec->data; + struct zoran *zr = videocodec_to_zoran(codec); u32 reg; int size; - dprintk(2, "%s: set_video %d/%d-%dx%d (%%%d) call\n", ptr->name, - cap->x, cap->y, cap->width, cap->height, cap->decimation); + zrdev_dbg(zr, "%s: set_video %d/%d-%dx%d (%%%d) call\n", ptr->name, + cap->x, cap->y, cap->width, cap->height, cap->decimation); /* if () return -EINVAL; * trust the master driver that it knows what it does - so @@ -637,10 +641,11 @@ static int zr36060_set_video(struct videocodec *codec, const struct tvnorm *norm static int zr36060_control(struct videocodec *codec, int type, int size, void *data) { struct zr36060 *ptr = (struct zr36060 *)codec->data; + struct zoran *zr = videocodec_to_zoran(codec); int *ival = (int *)data; - dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, - size); + zrdev_dbg(zr, "%s: control %d call with %d byte\n", ptr->name, type, + size); switch (type) { case CODEC_G_STATUS: /* get last status */ @@ -753,11 +758,12 @@ static int zr36060_control(struct videocodec *codec, int type, int size, void *d static int zr36060_unset(struct videocodec *codec) { struct zr36060 *ptr = codec->data; + struct zoran *zr = videocodec_to_zoran(codec); if (ptr) { /* do wee need some codec deinit here, too ???? */ - dprintk(1, "%s: finished codec #%d\n", ptr->name, ptr->num); + zrdev_dbg(zr, "%s: finished codec #%d\n", ptr->name, ptr->num); kfree(ptr); codec->data = NULL; @@ -778,12 +784,14 @@ static int zr36060_unset(struct videocodec *codec) static int zr36060_setup(struct videocodec *codec) { struct zr36060 *ptr; + struct zoran *zr = videocodec_to_zoran(codec); int res; - dprintk(2, "zr36060: initializing MJPEG subsystem #%d.\n", zr36060_codecs); + zrdev_dbg(zr, "zr36060: initializing MJPEG subsystem #%d.\n", + zr36060_codecs); if (zr36060_codecs == MAX_CODECS) { - pr_err("zr36060: Can't attach more codecs!\n"); + zrdev_err(zr, "zr36060: Can't attach more codecs!\n"); return -ENOSPC; } //mem structure init @@ -823,7 +831,7 @@ static int zr36060_setup(struct videocodec *codec) zr36060_init(ptr); - dprintk(1, KERN_INFO "%s: codec attached and running\n", ptr->name); + zrdev_info(zr, "%s: codec attached and running\n", ptr->name); return 0; } @@ -852,9 +860,8 @@ int zr36060_init_module(void) void zr36060_cleanup_module(void) { if (zr36060_codecs) { - dprintk(1, - "zr36060: something's wrong - %d codecs left somehow.\n", - zr36060_codecs); + pr_debug("zr36060: something's wrong - %d codecs left somehow.\n", + zr36060_codecs); } /* however, we can't just stay alive */ diff --git a/drivers/staging/octeon-usb/Kconfig b/drivers/staging/octeon-usb/Kconfig deleted file mode 100644 index 6a5d842ee0f2..000000000000 --- a/drivers/staging/octeon-usb/Kconfig +++ /dev/null @@ -1,11 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -config OCTEON_USB - tristate "Cavium Networks Octeon USB support" - depends on CAVIUM_OCTEON_SOC && USB - help - This driver supports USB host controller on some Cavium - Networks' products in the Octeon family. - - To compile this driver as a module, choose M here. The module - will be called octeon-hcd. - diff --git a/drivers/staging/octeon-usb/Makefile b/drivers/staging/octeon-usb/Makefile deleted file mode 100644 index 9873a0130ad5..000000000000 --- a/drivers/staging/octeon-usb/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-${CONFIG_OCTEON_USB} := octeon-hcd.o diff --git a/drivers/staging/octeon-usb/TODO b/drivers/staging/octeon-usb/TODO deleted file mode 100644 index 2b29acca5caa..000000000000 --- a/drivers/staging/octeon-usb/TODO +++ /dev/null @@ -1,8 +0,0 @@ -This driver is functional and has been tested on EdgeRouter Lite, -D-Link DSR-1000N and EBH5600 evaluation board with USB mass storage. - -TODO: - - kernel coding style - - checkpatch warnings - -Contact: Aaro Koskinen <aaro.koskinen@iki.fi> diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c deleted file mode 100644 index a1cd81d4a114..000000000000 --- a/drivers/staging/octeon-usb/octeon-hcd.c +++ /dev/null @@ -1,3740 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2008 Cavium Networks - * - * Some parts of the code were originally released under BSD license: - * - * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights - * reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Networks nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its associated - * regulations, and may be subject to export or import regulations in other - * countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR - * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO - * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION - * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM - * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, - * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF - * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR - * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR - * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. - */ - -#include <linux/usb.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/usb/hcd.h> -#include <linux/prefetch.h> -#include <linux/irqdomain.h> -#include <linux/dma-mapping.h> -#include <linux/platform_device.h> -#include <linux/of.h> - -#include <asm/octeon/octeon.h> - -#include "octeon-hcd.h" - -/** - * enum cvmx_usb_speed - the possible USB device speeds - * - * @CVMX_USB_SPEED_HIGH: Device is operation at 480Mbps - * @CVMX_USB_SPEED_FULL: Device is operation at 12Mbps - * @CVMX_USB_SPEED_LOW: Device is operation at 1.5Mbps - */ -enum cvmx_usb_speed { - CVMX_USB_SPEED_HIGH = 0, - CVMX_USB_SPEED_FULL = 1, - CVMX_USB_SPEED_LOW = 2, -}; - -/** - * enum cvmx_usb_transfer - the possible USB transfer types - * - * @CVMX_USB_TRANSFER_CONTROL: USB transfer type control for hub and status - * transfers - * @CVMX_USB_TRANSFER_ISOCHRONOUS: USB transfer type isochronous for low - * priority periodic transfers - * @CVMX_USB_TRANSFER_BULK: USB transfer type bulk for large low priority - * transfers - * @CVMX_USB_TRANSFER_INTERRUPT: USB transfer type interrupt for high priority - * periodic transfers - */ -enum cvmx_usb_transfer { - CVMX_USB_TRANSFER_CONTROL = 0, - CVMX_USB_TRANSFER_ISOCHRONOUS = 1, - CVMX_USB_TRANSFER_BULK = 2, - CVMX_USB_TRANSFER_INTERRUPT = 3, -}; - -/** - * enum cvmx_usb_direction - the transfer directions - * - * @CVMX_USB_DIRECTION_OUT: Data is transferring from Octeon to the device/host - * @CVMX_USB_DIRECTION_IN: Data is transferring from the device/host to Octeon - */ -enum cvmx_usb_direction { - CVMX_USB_DIRECTION_OUT, - CVMX_USB_DIRECTION_IN, -}; - -/** - * enum cvmx_usb_status - possible callback function status codes - * - * @CVMX_USB_STATUS_OK: The transaction / operation finished without - * any errors - * @CVMX_USB_STATUS_SHORT: FIXME: This is currently not implemented - * @CVMX_USB_STATUS_CANCEL: The transaction was canceled while in flight - * by a user call to cvmx_usb_cancel - * @CVMX_USB_STATUS_ERROR: The transaction aborted with an unexpected - * error status - * @CVMX_USB_STATUS_STALL: The transaction received a USB STALL response - * from the device - * @CVMX_USB_STATUS_XACTERR: The transaction failed with an error from the - * device even after a number of retries - * @CVMX_USB_STATUS_DATATGLERR: The transaction failed with a data toggle - * error even after a number of retries - * @CVMX_USB_STATUS_BABBLEERR: The transaction failed with a babble error - * @CVMX_USB_STATUS_FRAMEERR: The transaction failed with a frame error - * even after a number of retries - */ -enum cvmx_usb_status { - CVMX_USB_STATUS_OK, - CVMX_USB_STATUS_SHORT, - CVMX_USB_STATUS_CANCEL, - CVMX_USB_STATUS_ERROR, - CVMX_USB_STATUS_STALL, - CVMX_USB_STATUS_XACTERR, - CVMX_USB_STATUS_DATATGLERR, - CVMX_USB_STATUS_BABBLEERR, - CVMX_USB_STATUS_FRAMEERR, -}; - -/** - * struct cvmx_usb_port_status - the USB port status information - * - * @port_enabled: 1 = Usb port is enabled, 0 = disabled - * @port_over_current: 1 = Over current detected, 0 = Over current not - * detected. Octeon doesn't support over current detection. - * @port_powered: 1 = Port power is being supplied to the device, 0 = - * power is off. Octeon doesn't support turning port power - * off. - * @port_speed: Current port speed. - * @connected: 1 = A device is connected to the port, 0 = No device is - * connected. - * @connect_change: 1 = Device connected state changed since the last set - * status call. - */ -struct cvmx_usb_port_status { - u32 reserved : 25; - u32 port_enabled : 1; - u32 port_over_current : 1; - u32 port_powered : 1; - enum cvmx_usb_speed port_speed : 2; - u32 connected : 1; - u32 connect_change : 1; -}; - -/** - * struct cvmx_usb_iso_packet - descriptor for Isochronous packets - * - * @offset: This is the offset in bytes into the main buffer where this data - * is stored. - * @length: This is the length in bytes of the data. - * @status: This is the status of this individual packet transfer. - */ -struct cvmx_usb_iso_packet { - int offset; - int length; - enum cvmx_usb_status status; -}; - -/** - * enum cvmx_usb_initialize_flags - flags used by the initialization function - * - * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI: The USB port uses a 12MHz crystal - * as clock source at USB_XO and - * USB_XI. - * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND: The USB port uses 12/24/48MHz 2.5V - * board clock source at USB_XO. - * USB_XI should be tied to GND. - * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK: Mask for clock speed field - * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ: Speed of reference clock or - * crystal - * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ: Speed of reference clock - * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ: Speed of reference clock - * @CVMX_USB_INITIALIZE_FLAGS_NO_DMA: Disable DMA and used polled IO for - * data transfer use for the USB - */ -enum cvmx_usb_initialize_flags { - CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI = 1 << 0, - CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND = 1 << 1, - CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK = 3 << 3, - CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ = 1 << 3, - CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ = 2 << 3, - CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ = 3 << 3, - /* Bits 3-4 used to encode the clock frequency */ - CVMX_USB_INITIALIZE_FLAGS_NO_DMA = 1 << 5, -}; - -/** - * enum cvmx_usb_pipe_flags - internal flags for a pipe. - * - * @CVMX_USB_PIPE_FLAGS_SCHEDULED: Used internally to determine if a pipe is - * actively using hardware. - * @CVMX_USB_PIPE_FLAGS_NEED_PING: Used internally to determine if a high speed - * pipe is in the ping state. - */ -enum cvmx_usb_pipe_flags { - CVMX_USB_PIPE_FLAGS_SCHEDULED = 1 << 17, - CVMX_USB_PIPE_FLAGS_NEED_PING = 1 << 18, -}; - -/* Maximum number of times to retry failed transactions */ -#define MAX_RETRIES 3 - -/* Maximum number of hardware channels supported by the USB block */ -#define MAX_CHANNELS 8 - -/* - * The low level hardware can transfer a maximum of this number of bytes in each - * transfer. The field is 19 bits wide - */ -#define MAX_TRANSFER_BYTES ((1 << 19) - 1) - -/* - * The low level hardware can transfer a maximum of this number of packets in - * each transfer. The field is 10 bits wide - */ -#define MAX_TRANSFER_PACKETS ((1 << 10) - 1) - -/** - * Logical transactions may take numerous low level - * transactions, especially when splits are concerned. This - * enum represents all of the possible stages a transaction can - * be in. Note that split completes are always even. This is so - * the NAK handler can backup to the previous low level - * transaction with a simple clearing of bit 0. - */ -enum cvmx_usb_stage { - CVMX_USB_STAGE_NON_CONTROL, - CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE, - CVMX_USB_STAGE_SETUP, - CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE, - CVMX_USB_STAGE_DATA, - CVMX_USB_STAGE_DATA_SPLIT_COMPLETE, - CVMX_USB_STAGE_STATUS, - CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE, -}; - -/** - * struct cvmx_usb_transaction - describes each pending USB transaction - * regardless of type. These are linked together - * to form a list of pending requests for a pipe. - * - * @node: List node for transactions in the pipe. - * @type: Type of transaction, duplicated of the pipe. - * @flags: State flags for this transaction. - * @buffer: User's physical buffer address to read/write. - * @buffer_length: Size of the user's buffer in bytes. - * @control_header: For control transactions, physical address of the 8 - * byte standard header. - * @iso_start_frame: For ISO transactions, the starting frame number. - * @iso_number_packets: For ISO transactions, the number of packets in the - * request. - * @iso_packets: For ISO transactions, the sub packets in the request. - * @actual_bytes: Actual bytes transfer for this transaction. - * @stage: For control transactions, the current stage. - * @urb: URB. - */ -struct cvmx_usb_transaction { - struct list_head node; - enum cvmx_usb_transfer type; - u64 buffer; - int buffer_length; - u64 control_header; - int iso_start_frame; - int iso_number_packets; - struct cvmx_usb_iso_packet *iso_packets; - int xfersize; - int pktcnt; - int retries; - int actual_bytes; - enum cvmx_usb_stage stage; - struct urb *urb; -}; - -/** - * struct cvmx_usb_pipe - a pipe represents a virtual connection between Octeon - * and some USB device. It contains a list of pending - * request to the device. - * - * @node: List node for pipe list - * @next: Pipe after this one in the list - * @transactions: List of pending transactions - * @interval: For periodic pipes, the interval between packets in - * frames - * @next_tx_frame: The next frame this pipe is allowed to transmit on - * @flags: State flags for this pipe - * @device_speed: Speed of device connected to this pipe - * @transfer_type: Type of transaction supported by this pipe - * @transfer_dir: IN or OUT. Ignored for Control - * @multi_count: Max packet in a row for the device - * @max_packet: The device's maximum packet size in bytes - * @device_addr: USB device address at other end of pipe - * @endpoint_num: USB endpoint number at other end of pipe - * @hub_device_addr: Hub address this device is connected to - * @hub_port: Hub port this device is connected to - * @pid_toggle: This toggles between 0/1 on every packet send to track - * the data pid needed - * @channel: Hardware DMA channel for this pipe - * @split_sc_frame: The low order bits of the frame number the split - * complete should be sent on - */ -struct cvmx_usb_pipe { - struct list_head node; - struct list_head transactions; - u64 interval; - u64 next_tx_frame; - enum cvmx_usb_pipe_flags flags; - enum cvmx_usb_speed device_speed; - enum cvmx_usb_transfer transfer_type; - enum cvmx_usb_direction transfer_dir; - int multi_count; - u16 max_packet; - u8 device_addr; - u8 endpoint_num; - u8 hub_device_addr; - u8 hub_port; - u8 pid_toggle; - u8 channel; - s8 split_sc_frame; -}; - -struct cvmx_usb_tx_fifo { - struct { - int channel; - int size; - u64 address; - } entry[MAX_CHANNELS + 1]; - int head; - int tail; -}; - -/** - * struct octeon_hcd - the state of the USB block - * - * lock: Serialization lock. - * init_flags: Flags passed to initialize. - * index: Which USB block this is for. - * idle_hardware_channels: Bit set for every idle hardware channel. - * usbcx_hprt: Stored port status so we don't need to read a CSR to - * determine splits. - * pipe_for_channel: Map channels to pipes. - * pipe: Storage for pipes. - * indent: Used by debug output to indent functions. - * port_status: Last port status used for change notification. - * idle_pipes: List of open pipes that have no transactions. - * active_pipes: Active pipes indexed by transfer type. - * frame_number: Increments every SOF interrupt for time keeping. - * active_split: Points to the current active split, or NULL. - */ -struct octeon_hcd { - spinlock_t lock; /* serialization lock */ - int init_flags; - int index; - int idle_hardware_channels; - union cvmx_usbcx_hprt usbcx_hprt; - struct cvmx_usb_pipe *pipe_for_channel[MAX_CHANNELS]; - int indent; - struct cvmx_usb_port_status port_status; - struct list_head idle_pipes; - struct list_head active_pipes[4]; - u64 frame_number; - struct cvmx_usb_transaction *active_split; - struct cvmx_usb_tx_fifo periodic; - struct cvmx_usb_tx_fifo nonperiodic; -}; - -/* - * This macro logically sets a single field in a CSR. It does the sequence - * read, modify, and write - */ -#define USB_SET_FIELD32(address, _union, field, value) \ - do { \ - union _union c; \ - \ - c.u32 = cvmx_usb_read_csr32(usb, address); \ - c.s.field = value; \ - cvmx_usb_write_csr32(usb, address, c.u32); \ - } while (0) - -/* Returns the IO address to push/pop stuff data from the FIFOs */ -#define USB_FIFO_ADDRESS(channel, usb_index) \ - (CVMX_USBCX_GOTGCTL(usb_index) + ((channel) + 1) * 0x1000) - -/** - * struct octeon_temp_buffer - a bounce buffer for USB transfers - * @orig_buffer: the original buffer passed by the USB stack - * @data: the newly allocated temporary buffer (excluding meta-data) - * - * Both the DMA engine and FIFO mode will always transfer full 32-bit words. If - * the buffer is too short, we need to allocate a temporary one, and this struct - * represents it. - */ -struct octeon_temp_buffer { - void *orig_buffer; - u8 data[]; -}; - -static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p) -{ - return container_of((void *)p, struct usb_hcd, hcd_priv); -} - -/** - * octeon_alloc_temp_buffer - allocate a temporary buffer for USB transfer - * (if needed) - * @urb: URB. - * @mem_flags: Memory allocation flags. - * - * This function allocates a temporary bounce buffer whenever it's needed - * due to HW limitations. - */ -static int octeon_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags) -{ - struct octeon_temp_buffer *temp; - - if (urb->num_sgs || urb->sg || - (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) || - !(urb->transfer_buffer_length % sizeof(u32))) - return 0; - - temp = kmalloc(ALIGN(urb->transfer_buffer_length, sizeof(u32)) + - sizeof(*temp), mem_flags); - if (!temp) - return -ENOMEM; - - temp->orig_buffer = urb->transfer_buffer; - if (usb_urb_dir_out(urb)) - memcpy(temp->data, urb->transfer_buffer, - urb->transfer_buffer_length); - urb->transfer_buffer = temp->data; - urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; - - return 0; -} - -/** - * octeon_free_temp_buffer - free a temporary buffer used by USB transfers. - * @urb: URB. - * - * Frees a buffer allocated by octeon_alloc_temp_buffer(). - */ -static void octeon_free_temp_buffer(struct urb *urb) -{ - struct octeon_temp_buffer *temp; - size_t length; - - if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) - return; - - temp = container_of(urb->transfer_buffer, struct octeon_temp_buffer, - data); - if (usb_urb_dir_in(urb)) { - if (usb_pipeisoc(urb->pipe)) - length = urb->transfer_buffer_length; - else - length = urb->actual_length; - - memcpy(temp->orig_buffer, urb->transfer_buffer, length); - } - urb->transfer_buffer = temp->orig_buffer; - urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; - kfree(temp); -} - -/** - * octeon_map_urb_for_dma - Octeon-specific map_urb_for_dma(). - * @hcd: USB HCD structure. - * @urb: URB. - * @mem_flags: Memory allocation flags. - */ -static int octeon_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, - gfp_t mem_flags) -{ - int ret; - - ret = octeon_alloc_temp_buffer(urb, mem_flags); - if (ret) - return ret; - - ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); - if (ret) - octeon_free_temp_buffer(urb); - - return ret; -} - -/** - * octeon_unmap_urb_for_dma - Octeon-specific unmap_urb_for_dma() - * @hcd: USB HCD structure. - * @urb: URB. - */ -static void octeon_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) -{ - usb_hcd_unmap_urb_for_dma(hcd, urb); - octeon_free_temp_buffer(urb); -} - -/** - * Read a USB 32bit CSR. It performs the necessary address swizzle - * for 32bit CSRs and logs the value in a readable format if - * debugging is on. - * - * @usb: USB block this access is for - * @address: 64bit address to read - * - * Returns: Result of the read - */ -static inline u32 cvmx_usb_read_csr32(struct octeon_hcd *usb, u64 address) -{ - return cvmx_read64_uint32(address ^ 4); -} - -/** - * Write a USB 32bit CSR. It performs the necessary address - * swizzle for 32bit CSRs and logs the value in a readable format - * if debugging is on. - * - * @usb: USB block this access is for - * @address: 64bit address to write - * @value: Value to write - */ -static inline void cvmx_usb_write_csr32(struct octeon_hcd *usb, - u64 address, u32 value) -{ - cvmx_write64_uint32(address ^ 4, value); - cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index)); -} - -/** - * Return non zero if this pipe connects to a non HIGH speed - * device through a high speed hub. - * - * @usb: USB block this access is for - * @pipe: Pipe to check - * - * Returns: Non zero if we need to do split transactions - */ -static inline int cvmx_usb_pipe_needs_split(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe) -{ - return pipe->device_speed != CVMX_USB_SPEED_HIGH && - usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH; -} - -/** - * Trivial utility function to return the correct PID for a pipe - * - * @pipe: pipe to check - * - * Returns: PID for pipe - */ -static inline int cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe) -{ - if (pipe->pid_toggle) - return 2; /* Data1 */ - return 0; /* Data0 */ -} - -/* Loops through register until txfflsh or rxfflsh become zero.*/ -static int cvmx_wait_tx_rx(struct octeon_hcd *usb, int fflsh_type) -{ - int result; - u64 address = CVMX_USBCX_GRSTCTL(usb->index); - u64 done = cvmx_get_cycle() + 100 * - (u64)octeon_get_clock_rate / 1000000; - union cvmx_usbcx_grstctl c; - - while (1) { - c.u32 = cvmx_usb_read_csr32(usb, address); - if (fflsh_type == 0 && c.s.txfflsh == 0) { - result = 0; - break; - } else if (fflsh_type == 1 && c.s.rxfflsh == 0) { - result = 0; - break; - } else if (cvmx_get_cycle() > done) { - result = -1; - break; - } - - __delay(100); - } - return result; -} - -static void cvmx_fifo_setup(struct octeon_hcd *usb) -{ - union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3; - union cvmx_usbcx_gnptxfsiz npsiz; - union cvmx_usbcx_hptxfsiz psiz; - - usbcx_ghwcfg3.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_GHWCFG3(usb->index)); - - /* - * Program the USBC_GRXFSIZ register to select the size of the receive - * FIFO (25%). - */ - USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index), cvmx_usbcx_grxfsiz, - rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4); - - /* - * Program the USBC_GNPTXFSIZ register to select the size and the start - * address of the non-periodic transmit FIFO for nonperiodic - * transactions (50%). - */ - npsiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index)); - npsiz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2; - npsiz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4; - cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), npsiz.u32); - - /* - * Program the USBC_HPTXFSIZ register to select the size and start - * address of the periodic transmit FIFO for periodic transactions - * (25%). - */ - psiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index)); - psiz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4; - psiz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4; - cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index), psiz.u32); - - /* Flush all FIFOs */ - USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), - cvmx_usbcx_grstctl, txfnum, 0x10); - USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), - cvmx_usbcx_grstctl, txfflsh, 1); - cvmx_wait_tx_rx(usb, 0); - USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), - cvmx_usbcx_grstctl, rxfflsh, 1); - cvmx_wait_tx_rx(usb, 1); -} - -/** - * Shutdown a USB port after a call to cvmx_usb_initialize(). - * The port should be disabled with all pipes closed when this - * function is called. - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * - * Returns: 0 or a negative error code. - */ -static int cvmx_usb_shutdown(struct octeon_hcd *usb) -{ - union cvmx_usbnx_clk_ctl usbn_clk_ctl; - - /* Make sure all pipes are closed */ - if (!list_empty(&usb->idle_pipes) || - !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_ISOCHRONOUS]) || - !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_INTERRUPT]) || - !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_CONTROL]) || - !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_BULK])) - return -EBUSY; - - /* Disable the clocks and put them in power on reset */ - usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index)); - usbn_clk_ctl.s.enable = 1; - usbn_clk_ctl.s.por = 1; - usbn_clk_ctl.s.hclk_rst = 1; - usbn_clk_ctl.s.prst = 0; - usbn_clk_ctl.s.hrst = 0; - cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); - return 0; -} - -/** - * Initialize a USB port for use. This must be called before any - * other access to the Octeon USB port is made. The port starts - * off in the disabled state. - * - * @dev: Pointer to struct device for logging purposes. - * @usb: Pointer to struct octeon_hcd. - * - * Returns: 0 or a negative error code. - */ -static int cvmx_usb_initialize(struct device *dev, - struct octeon_hcd *usb) -{ - int channel; - int divisor; - int retries = 0; - union cvmx_usbcx_hcfg usbcx_hcfg; - union cvmx_usbnx_clk_ctl usbn_clk_ctl; - union cvmx_usbcx_gintsts usbc_gintsts; - union cvmx_usbcx_gahbcfg usbcx_gahbcfg; - union cvmx_usbcx_gintmsk usbcx_gintmsk; - union cvmx_usbcx_gusbcfg usbcx_gusbcfg; - union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status; - -retry: - /* - * Power On Reset and PHY Initialization - * - * 1. Wait for DCOK to assert (nothing to do) - * - * 2a. Write USBN0/1_CLK_CTL[POR] = 1 and - * USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0 - */ - usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index)); - usbn_clk_ctl.s.por = 1; - usbn_clk_ctl.s.hrst = 0; - usbn_clk_ctl.s.prst = 0; - usbn_clk_ctl.s.hclk_rst = 0; - usbn_clk_ctl.s.enable = 0; - /* - * 2b. Select the USB reference clock/crystal parameters by writing - * appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON] - */ - if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) { - /* - * The USB port uses 12/24/48MHz 2.5V board clock - * source at USB_XO. USB_XI should be tied to GND. - * Most Octeon evaluation boards require this setting - */ - if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || - OCTEON_IS_MODEL(OCTEON_CN56XX) || - OCTEON_IS_MODEL(OCTEON_CN50XX)) - /* From CN56XX,CN50XX,CN31XX,CN30XX manuals */ - usbn_clk_ctl.s.p_rtype = 2; /* p_rclk=1 & p_xenbn=0 */ - else - /* From CN52XX manual */ - usbn_clk_ctl.s.p_rtype = 1; - - switch (usb->init_flags & - CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK) { - case CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ: - usbn_clk_ctl.s.p_c_sel = 0; - break; - case CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ: - usbn_clk_ctl.s.p_c_sel = 1; - break; - case CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ: - usbn_clk_ctl.s.p_c_sel = 2; - break; - } - } else { - /* - * The USB port uses a 12MHz crystal as clock source - * at USB_XO and USB_XI - */ - if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) - /* From CN31XX,CN30XX manual */ - usbn_clk_ctl.s.p_rtype = 3; /* p_rclk=1 & p_xenbn=1 */ - else - /* From CN56XX,CN52XX,CN50XX manuals. */ - usbn_clk_ctl.s.p_rtype = 0; - - usbn_clk_ctl.s.p_c_sel = 0; - } - /* - * 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and - * setting USBN0/1_CLK_CTL[ENABLE] = 1. Divide the core clock down - * such that USB is as close as possible to 125Mhz - */ - divisor = DIV_ROUND_UP(octeon_get_clock_rate(), 125000000); - /* Lower than 4 doesn't seem to work properly */ - if (divisor < 4) - divisor = 4; - usbn_clk_ctl.s.divide = divisor; - usbn_clk_ctl.s.divide2 = 0; - cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); - - /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */ - usbn_clk_ctl.s.hclk_rst = 1; - cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); - /* 2e. Wait 64 core-clock cycles for HCLK to stabilize */ - __delay(64); - /* - * 3. Program the power-on reset field in the USBN clock-control - * register: - * USBN_CLK_CTL[POR] = 0 - */ - usbn_clk_ctl.s.por = 0; - cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); - /* 4. Wait 1 ms for PHY clock to start */ - mdelay(1); - /* - * 5. Program the Reset input from automatic test equipment field in the - * USBP control and status register: - * USBN_USBP_CTL_STATUS[ATE_RESET] = 1 - */ - usbn_usbp_ctl_status.u64 = - cvmx_read64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index)); - usbn_usbp_ctl_status.s.ate_reset = 1; - cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index), - usbn_usbp_ctl_status.u64); - /* 6. Wait 10 cycles */ - __delay(10); - /* - * 7. Clear ATE_RESET field in the USBN clock-control register: - * USBN_USBP_CTL_STATUS[ATE_RESET] = 0 - */ - usbn_usbp_ctl_status.s.ate_reset = 0; - cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index), - usbn_usbp_ctl_status.u64); - /* - * 8. Program the PHY reset field in the USBN clock-control register: - * USBN_CLK_CTL[PRST] = 1 - */ - usbn_clk_ctl.s.prst = 1; - cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); - /* - * 9. Program the USBP control and status register to select host or - * device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for - * device - */ - usbn_usbp_ctl_status.s.hst_mode = 0; - cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index), - usbn_usbp_ctl_status.u64); - /* 10. Wait 1 us */ - udelay(1); - /* - * 11. Program the hreset_n field in the USBN clock-control register: - * USBN_CLK_CTL[HRST] = 1 - */ - usbn_clk_ctl.s.hrst = 1; - cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); - /* 12. Proceed to USB core initialization */ - usbn_clk_ctl.s.enable = 1; - cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); - udelay(1); - - /* - * USB Core Initialization - * - * 1. Read USBC_GHWCFG1, USBC_GHWCFG2, USBC_GHWCFG3, USBC_GHWCFG4 to - * determine USB core configuration parameters. - * - * Nothing needed - * - * 2. Program the following fields in the global AHB configuration - * register (USBC_GAHBCFG) - * DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode - * Burst length, USBC_GAHBCFG[HBSTLEN] = 0 - * Nonperiodic TxFIFO empty level (slave mode only), - * USBC_GAHBCFG[NPTXFEMPLVL] - * Periodic TxFIFO empty level (slave mode only), - * USBC_GAHBCFG[PTXFEMPLVL] - * Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1 - */ - usbcx_gahbcfg.u32 = 0; - usbcx_gahbcfg.s.dmaen = !(usb->init_flags & - CVMX_USB_INITIALIZE_FLAGS_NO_DMA); - usbcx_gahbcfg.s.hbstlen = 0; - usbcx_gahbcfg.s.nptxfemplvl = 1; - usbcx_gahbcfg.s.ptxfemplvl = 1; - usbcx_gahbcfg.s.glblintrmsk = 1; - cvmx_usb_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index), - usbcx_gahbcfg.u32); - - /* - * 3. Program the following fields in USBC_GUSBCFG register. - * HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0 - * ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0 - * USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5 - * PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0 - */ - usbcx_gusbcfg.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_GUSBCFG(usb->index)); - usbcx_gusbcfg.s.toutcal = 0; - usbcx_gusbcfg.s.ddrsel = 0; - usbcx_gusbcfg.s.usbtrdtim = 0x5; - usbcx_gusbcfg.s.phylpwrclksel = 0; - cvmx_usb_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index), - usbcx_gusbcfg.u32); - - /* - * 4. The software must unmask the following bits in the USBC_GINTMSK - * register. - * OTG interrupt mask, USBC_GINTMSK[OTGINTMSK] = 1 - * Mode mismatch interrupt mask, USBC_GINTMSK[MODEMISMSK] = 1 - */ - usbcx_gintmsk.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_GINTMSK(usb->index)); - usbcx_gintmsk.s.otgintmsk = 1; - usbcx_gintmsk.s.modemismsk = 1; - usbcx_gintmsk.s.hchintmsk = 1; - usbcx_gintmsk.s.sofmsk = 0; - /* We need RX FIFO interrupts if we don't have DMA */ - if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) - usbcx_gintmsk.s.rxflvlmsk = 1; - cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index), - usbcx_gintmsk.u32); - - /* - * Disable all channel interrupts. We'll enable them per channel later. - */ - for (channel = 0; channel < 8; channel++) - cvmx_usb_write_csr32(usb, - CVMX_USBCX_HCINTMSKX(channel, usb->index), - 0); - - /* - * Host Port Initialization - * - * 1. Program the host-port interrupt-mask field to unmask, - * USBC_GINTMSK[PRTINT] = 1 - */ - USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), - cvmx_usbcx_gintmsk, prtintmsk, 1); - USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), - cvmx_usbcx_gintmsk, disconnintmsk, 1); - - /* - * 2. Program the USBC_HCFG register to select full-speed host - * or high-speed host. - */ - usbcx_hcfg.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HCFG(usb->index)); - usbcx_hcfg.s.fslssupp = 0; - usbcx_hcfg.s.fslspclksel = 0; - cvmx_usb_write_csr32(usb, CVMX_USBCX_HCFG(usb->index), usbcx_hcfg.u32); - - cvmx_fifo_setup(usb); - - /* - * If the controller is getting port events right after the reset, it - * means the initialization failed. Try resetting the controller again - * in such case. This is seen to happen after cold boot on DSR-1000N. - */ - usbc_gintsts.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_GINTSTS(usb->index)); - cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index), - usbc_gintsts.u32); - dev_dbg(dev, "gintsts after reset: 0x%x\n", (int)usbc_gintsts.u32); - if (!usbc_gintsts.s.disconnint && !usbc_gintsts.s.prtint) - return 0; - if (retries++ >= 5) - return -EAGAIN; - dev_info(dev, "controller reset failed (gintsts=0x%x) - retrying\n", - (int)usbc_gintsts.u32); - msleep(50); - cvmx_usb_shutdown(usb); - msleep(50); - goto retry; -} - -/** - * Reset a USB port. After this call succeeds, the USB port is - * online and servicing requests. - * - * @usb: USB device state populated by cvmx_usb_initialize(). - */ -static void cvmx_usb_reset_port(struct octeon_hcd *usb) -{ - usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HPRT(usb->index)); - - /* Program the port reset bit to start the reset process */ - USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt, - prtrst, 1); - - /* - * Wait at least 50ms (high speed), or 10ms (full speed) for the reset - * process to complete. - */ - mdelay(50); - - /* Program the port reset bit to 0, USBC_HPRT[PRTRST] = 0 */ - USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt, - prtrst, 0); - - /* - * Read the port speed field to get the enumerated speed, - * USBC_HPRT[PRTSPD]. - */ - usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HPRT(usb->index)); -} - -/** - * Disable a USB port. After this call the USB port will not - * generate data transfers and will not generate events. - * Transactions in process will fail and call their - * associated callbacks. - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * - * Returns: 0 or a negative error code. - */ -static int cvmx_usb_disable(struct octeon_hcd *usb) -{ - /* Disable the port */ - USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt, - prtena, 1); - return 0; -} - -/** - * Get the current state of the USB port. Use this call to - * determine if the usb port has anything connected, is enabled, - * or has some sort of error condition. The return value of this - * call has "changed" bits to signal of the value of some fields - * have changed between calls. - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * - * Returns: Port status information - */ -static struct cvmx_usb_port_status cvmx_usb_get_status(struct octeon_hcd *usb) -{ - union cvmx_usbcx_hprt usbc_hprt; - struct cvmx_usb_port_status result; - - memset(&result, 0, sizeof(result)); - - usbc_hprt.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index)); - result.port_enabled = usbc_hprt.s.prtena; - result.port_over_current = usbc_hprt.s.prtovrcurract; - result.port_powered = usbc_hprt.s.prtpwr; - result.port_speed = usbc_hprt.s.prtspd; - result.connected = usbc_hprt.s.prtconnsts; - result.connect_change = - result.connected != usb->port_status.connected; - - return result; -} - -/** - * Open a virtual pipe between the host and a USB device. A pipe - * must be opened before data can be transferred between a device - * and Octeon. - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * @device_addr: - * USB device address to open the pipe to - * (0-127). - * @endpoint_num: - * USB endpoint number to open the pipe to - * (0-15). - * @device_speed: - * The speed of the device the pipe is going - * to. This must match the device's speed, - * which may be different than the port speed. - * @max_packet: The maximum packet length the device can - * transmit/receive (low speed=0-8, full - * speed=0-1023, high speed=0-1024). This value - * comes from the standard endpoint descriptor - * field wMaxPacketSize bits <10:0>. - * @transfer_type: - * The type of transfer this pipe is for. - * @transfer_dir: - * The direction the pipe is in. This is not - * used for control pipes. - * @interval: For ISOCHRONOUS and INTERRUPT transfers, - * this is how often the transfer is scheduled - * for. All other transfers should specify - * zero. The units are in frames (8000/sec at - * high speed, 1000/sec for full speed). - * @multi_count: - * For high speed devices, this is the maximum - * allowed number of packet per microframe. - * Specify zero for non high speed devices. This - * value comes from the standard endpoint descriptor - * field wMaxPacketSize bits <12:11>. - * @hub_device_addr: - * Hub device address this device is connected - * to. Devices connected directly to Octeon - * use zero. This is only used when the device - * is full/low speed behind a high speed hub. - * The address will be of the high speed hub, - * not and full speed hubs after it. - * @hub_port: Which port on the hub the device is - * connected. Use zero for devices connected - * directly to Octeon. Like hub_device_addr, - * this is only used for full/low speed - * devices behind a high speed hub. - * - * Returns: A non-NULL value is a pipe. NULL means an error. - */ -static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct octeon_hcd *usb, - int device_addr, - int endpoint_num, - enum cvmx_usb_speed - device_speed, - int max_packet, - enum cvmx_usb_transfer - transfer_type, - enum cvmx_usb_direction - transfer_dir, - int interval, int multi_count, - int hub_device_addr, - int hub_port) -{ - struct cvmx_usb_pipe *pipe; - - pipe = kzalloc(sizeof(*pipe), GFP_ATOMIC); - if (!pipe) - return NULL; - if ((device_speed == CVMX_USB_SPEED_HIGH) && - (transfer_dir == CVMX_USB_DIRECTION_OUT) && - (transfer_type == CVMX_USB_TRANSFER_BULK)) - pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING; - pipe->device_addr = device_addr; - pipe->endpoint_num = endpoint_num; - pipe->device_speed = device_speed; - pipe->max_packet = max_packet; - pipe->transfer_type = transfer_type; - pipe->transfer_dir = transfer_dir; - INIT_LIST_HEAD(&pipe->transactions); - - /* - * All pipes use interval to rate limit NAK processing. Force an - * interval if one wasn't supplied - */ - if (!interval) - interval = 1; - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - pipe->interval = interval * 8; - /* Force start splits to be schedule on uFrame 0 */ - pipe->next_tx_frame = ((usb->frame_number + 7) & ~7) + - pipe->interval; - } else { - pipe->interval = interval; - pipe->next_tx_frame = usb->frame_number + pipe->interval; - } - pipe->multi_count = multi_count; - pipe->hub_device_addr = hub_device_addr; - pipe->hub_port = hub_port; - pipe->pid_toggle = 0; - pipe->split_sc_frame = -1; - list_add_tail(&pipe->node, &usb->idle_pipes); - - /* - * We don't need to tell the hardware about this pipe yet since - * it doesn't have any submitted requests - */ - - return pipe; -} - -/** - * Poll the RX FIFOs and remove data as needed. This function is only used - * in non DMA mode. It is very important that this function be called quickly - * enough to prevent FIFO overflow. - * - * @usb: USB device state populated by cvmx_usb_initialize(). - */ -static void cvmx_usb_poll_rx_fifo(struct octeon_hcd *usb) -{ - union cvmx_usbcx_grxstsph rx_status; - int channel; - int bytes; - u64 address; - u32 *ptr; - - rx_status.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_GRXSTSPH(usb->index)); - /* Only read data if IN data is there */ - if (rx_status.s.pktsts != 2) - return; - /* Check if no data is available */ - if (!rx_status.s.bcnt) - return; - - channel = rx_status.s.chnum; - bytes = rx_status.s.bcnt; - if (!bytes) - return; - - /* Get where the DMA engine would have written this data */ - address = cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) + - channel * 8); - - ptr = cvmx_phys_to_ptr(address); - cvmx_write64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel * 8, - address + bytes); - - /* Loop writing the FIFO data for this packet into memory */ - while (bytes > 0) { - *ptr++ = cvmx_usb_read_csr32(usb, - USB_FIFO_ADDRESS(channel, usb->index)); - bytes -= 4; - } - CVMX_SYNCW; -} - -/** - * Fill the TX hardware fifo with data out of the software - * fifos - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * @fifo: Software fifo to use - * @available: Amount of space in the hardware fifo - * - * Returns: Non zero if the hardware fifo was too small and needs - * to be serviced again. - */ -static int cvmx_usb_fill_tx_hw(struct octeon_hcd *usb, - struct cvmx_usb_tx_fifo *fifo, int available) -{ - /* - * We're done either when there isn't anymore space or the software FIFO - * is empty - */ - while (available && (fifo->head != fifo->tail)) { - int i = fifo->tail; - const u32 *ptr = cvmx_phys_to_ptr(fifo->entry[i].address); - u64 csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel, - usb->index) ^ 4; - int words = available; - - /* Limit the amount of data to what the SW fifo has */ - if (fifo->entry[i].size <= available) { - words = fifo->entry[i].size; - fifo->tail++; - if (fifo->tail > MAX_CHANNELS) - fifo->tail = 0; - } - - /* Update the next locations and counts */ - available -= words; - fifo->entry[i].address += words * 4; - fifo->entry[i].size -= words; - - /* - * Write the HW fifo data. The read every three writes is due - * to an errata on CN3XXX chips - */ - while (words > 3) { - cvmx_write64_uint32(csr_address, *ptr++); - cvmx_write64_uint32(csr_address, *ptr++); - cvmx_write64_uint32(csr_address, *ptr++); - cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index)); - words -= 3; - } - cvmx_write64_uint32(csr_address, *ptr++); - if (--words) { - cvmx_write64_uint32(csr_address, *ptr++); - if (--words) - cvmx_write64_uint32(csr_address, *ptr++); - } - cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index)); - } - return fifo->head != fifo->tail; -} - -/** - * Check the hardware FIFOs and fill them as needed - * - * @usb: USB device state populated by cvmx_usb_initialize(). - */ -static void cvmx_usb_poll_tx_fifo(struct octeon_hcd *usb) -{ - if (usb->periodic.head != usb->periodic.tail) { - union cvmx_usbcx_hptxsts tx_status; - - tx_status.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HPTXSTS(usb->index)); - if (cvmx_usb_fill_tx_hw(usb, &usb->periodic, - tx_status.s.ptxfspcavail)) - USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), - cvmx_usbcx_gintmsk, ptxfempmsk, 1); - else - USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), - cvmx_usbcx_gintmsk, ptxfempmsk, 0); - } - - if (usb->nonperiodic.head != usb->nonperiodic.tail) { - union cvmx_usbcx_gnptxsts tx_status; - - tx_status.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_GNPTXSTS(usb->index)); - if (cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic, - tx_status.s.nptxfspcavail)) - USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), - cvmx_usbcx_gintmsk, nptxfempmsk, 1); - else - USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), - cvmx_usbcx_gintmsk, nptxfempmsk, 0); - } -} - -/** - * Fill the TX FIFO with an outgoing packet - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * @channel: Channel number to get packet from - */ -static void cvmx_usb_fill_tx_fifo(struct octeon_hcd *usb, int channel) -{ - union cvmx_usbcx_hccharx hcchar; - union cvmx_usbcx_hcspltx usbc_hcsplt; - union cvmx_usbcx_hctsizx usbc_hctsiz; - struct cvmx_usb_tx_fifo *fifo; - - /* We only need to fill data on outbound channels */ - hcchar.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HCCHARX(channel, usb->index)); - if (hcchar.s.epdir != CVMX_USB_DIRECTION_OUT) - return; - - /* OUT Splits only have data on the start and not the complete */ - usbc_hcsplt.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HCSPLTX(channel, usb->index)); - if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt) - return; - - /* - * Find out how many bytes we need to fill and convert it into 32bit - * words. - */ - usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HCTSIZX(channel, usb->index)); - if (!usbc_hctsiz.s.xfersize) - return; - - if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) || - (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS)) - fifo = &usb->periodic; - else - fifo = &usb->nonperiodic; - - fifo->entry[fifo->head].channel = channel; - fifo->entry[fifo->head].address = - cvmx_read64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + - channel * 8); - fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize + 3) >> 2; - fifo->head++; - if (fifo->head > MAX_CHANNELS) - fifo->head = 0; - - cvmx_usb_poll_tx_fifo(usb); -} - -/** - * Perform channel specific setup for Control transactions. All - * the generic stuff will already have been done in cvmx_usb_start_channel(). - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * @channel: Channel to setup - * @pipe: Pipe for control transaction - */ -static void cvmx_usb_start_channel_control(struct octeon_hcd *usb, - int channel, - struct cvmx_usb_pipe *pipe) -{ - struct usb_hcd *hcd = octeon_to_hcd(usb); - struct device *dev = hcd->self.controller; - struct cvmx_usb_transaction *transaction = - list_first_entry(&pipe->transactions, typeof(*transaction), - node); - struct usb_ctrlrequest *header = - cvmx_phys_to_ptr(transaction->control_header); - int bytes_to_transfer = transaction->buffer_length - - transaction->actual_bytes; - int packets_to_transfer; - union cvmx_usbcx_hctsizx usbc_hctsiz; - - usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HCTSIZX(channel, usb->index)); - - switch (transaction->stage) { - case CVMX_USB_STAGE_NON_CONTROL: - case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE: - dev_err(dev, "%s: ERROR - Non control stage\n", __func__); - break; - case CVMX_USB_STAGE_SETUP: - usbc_hctsiz.s.pid = 3; /* Setup */ - bytes_to_transfer = sizeof(*header); - /* All Control operations start with a setup going OUT */ - USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), - cvmx_usbcx_hccharx, epdir, - CVMX_USB_DIRECTION_OUT); - /* - * Setup send the control header instead of the buffer data. The - * buffer data will be used in the next stage - */ - cvmx_write64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + - channel * 8, - transaction->control_header); - break; - case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE: - usbc_hctsiz.s.pid = 3; /* Setup */ - bytes_to_transfer = 0; - /* All Control operations start with a setup going OUT */ - USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), - cvmx_usbcx_hccharx, epdir, - CVMX_USB_DIRECTION_OUT); - - USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), - cvmx_usbcx_hcspltx, compsplt, 1); - break; - case CVMX_USB_STAGE_DATA: - usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - if (header->bRequestType & USB_DIR_IN) - bytes_to_transfer = 0; - else if (bytes_to_transfer > pipe->max_packet) - bytes_to_transfer = pipe->max_packet; - } - USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), - cvmx_usbcx_hccharx, epdir, - ((header->bRequestType & USB_DIR_IN) ? - CVMX_USB_DIRECTION_IN : - CVMX_USB_DIRECTION_OUT)); - break; - case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE: - usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); - if (!(header->bRequestType & USB_DIR_IN)) - bytes_to_transfer = 0; - USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), - cvmx_usbcx_hccharx, epdir, - ((header->bRequestType & USB_DIR_IN) ? - CVMX_USB_DIRECTION_IN : - CVMX_USB_DIRECTION_OUT)); - USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), - cvmx_usbcx_hcspltx, compsplt, 1); - break; - case CVMX_USB_STAGE_STATUS: - usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); - bytes_to_transfer = 0; - USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), - cvmx_usbcx_hccharx, epdir, - ((header->bRequestType & USB_DIR_IN) ? - CVMX_USB_DIRECTION_OUT : - CVMX_USB_DIRECTION_IN)); - break; - case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE: - usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); - bytes_to_transfer = 0; - USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), - cvmx_usbcx_hccharx, epdir, - ((header->bRequestType & USB_DIR_IN) ? - CVMX_USB_DIRECTION_OUT : - CVMX_USB_DIRECTION_IN)); - USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), - cvmx_usbcx_hcspltx, compsplt, 1); - break; - } - - /* - * Make sure the transfer never exceeds the byte limit of the hardware. - * Further bytes will be sent as continued transactions - */ - if (bytes_to_transfer > MAX_TRANSFER_BYTES) { - /* Round MAX_TRANSFER_BYTES to a multiple of out packet size */ - bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet; - bytes_to_transfer *= pipe->max_packet; - } - - /* - * Calculate the number of packets to transfer. If the length is zero - * we still need to transfer one packet - */ - packets_to_transfer = DIV_ROUND_UP(bytes_to_transfer, - pipe->max_packet); - if (packets_to_transfer == 0) { - packets_to_transfer = 1; - } else if ((packets_to_transfer > 1) && - (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) { - /* - * Limit to one packet when not using DMA. Channels must be - * restarted between every packet for IN transactions, so there - * is no reason to do multiple packets in a row - */ - packets_to_transfer = 1; - bytes_to_transfer = packets_to_transfer * pipe->max_packet; - } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) { - /* - * Limit the number of packet and data transferred to what the - * hardware can handle - */ - packets_to_transfer = MAX_TRANSFER_PACKETS; - bytes_to_transfer = packets_to_transfer * pipe->max_packet; - } - - usbc_hctsiz.s.xfersize = bytes_to_transfer; - usbc_hctsiz.s.pktcnt = packets_to_transfer; - - cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), - usbc_hctsiz.u32); -} - -/** - * Start a channel to perform the pipe's head transaction - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * @channel: Channel to setup - * @pipe: Pipe to start - */ -static void cvmx_usb_start_channel(struct octeon_hcd *usb, int channel, - struct cvmx_usb_pipe *pipe) -{ - struct cvmx_usb_transaction *transaction = - list_first_entry(&pipe->transactions, typeof(*transaction), - node); - - /* Make sure all writes to the DMA region get flushed */ - CVMX_SYNCW; - - /* Attach the channel to the pipe */ - usb->pipe_for_channel[channel] = pipe; - pipe->channel = channel; - pipe->flags |= CVMX_USB_PIPE_FLAGS_SCHEDULED; - - /* Mark this channel as in use */ - usb->idle_hardware_channels &= ~(1 << channel); - - /* Enable the channel interrupt bits */ - { - union cvmx_usbcx_hcintx usbc_hcint; - union cvmx_usbcx_hcintmskx usbc_hcintmsk; - union cvmx_usbcx_haintmsk usbc_haintmsk; - - /* Clear all channel status bits */ - usbc_hcint.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HCINTX(channel, usb->index)); - - cvmx_usb_write_csr32(usb, - CVMX_USBCX_HCINTX(channel, usb->index), - usbc_hcint.u32); - - usbc_hcintmsk.u32 = 0; - usbc_hcintmsk.s.chhltdmsk = 1; - if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) { - /* - * Channels need these extra interrupts when we aren't - * in DMA mode. - */ - usbc_hcintmsk.s.datatglerrmsk = 1; - usbc_hcintmsk.s.frmovrunmsk = 1; - usbc_hcintmsk.s.bblerrmsk = 1; - usbc_hcintmsk.s.xacterrmsk = 1; - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - /* - * Splits don't generate xfercompl, so we need - * ACK and NYET. - */ - usbc_hcintmsk.s.nyetmsk = 1; - usbc_hcintmsk.s.ackmsk = 1; - } - usbc_hcintmsk.s.nakmsk = 1; - usbc_hcintmsk.s.stallmsk = 1; - usbc_hcintmsk.s.xfercomplmsk = 1; - } - cvmx_usb_write_csr32(usb, - CVMX_USBCX_HCINTMSKX(channel, usb->index), - usbc_hcintmsk.u32); - - /* Enable the channel interrupt to propagate */ - usbc_haintmsk.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HAINTMSK(usb->index)); - usbc_haintmsk.s.haintmsk |= 1 << channel; - cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index), - usbc_haintmsk.u32); - } - - /* Setup the location the DMA engine uses. */ - { - u64 reg; - u64 dma_address = transaction->buffer + - transaction->actual_bytes; - - if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS) - dma_address = transaction->buffer + - transaction->iso_packets[0].offset + - transaction->actual_bytes; - - if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) - reg = CVMX_USBNX_DMA0_OUTB_CHN0(usb->index); - else - reg = CVMX_USBNX_DMA0_INB_CHN0(usb->index); - cvmx_write64_uint64(reg + channel * 8, dma_address); - } - - /* Setup both the size of the transfer and the SPLIT characteristics */ - { - union cvmx_usbcx_hcspltx usbc_hcsplt = {.u32 = 0}; - union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = 0}; - int packets_to_transfer; - int bytes_to_transfer = transaction->buffer_length - - transaction->actual_bytes; - - /* - * ISOCHRONOUS transactions store each individual transfer size - * in the packet structure, not the global buffer_length - */ - if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS) - bytes_to_transfer = - transaction->iso_packets[0].length - - transaction->actual_bytes; - - /* - * We need to do split transactions when we are talking to non - * high speed devices that are behind a high speed hub - */ - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - /* - * On the start split phase (stage is even) record the - * frame number we will need to send the split complete. - * We only store the lower two bits since the time ahead - * can only be two frames - */ - if ((transaction->stage & 1) == 0) { - if (transaction->type == CVMX_USB_TRANSFER_BULK) - pipe->split_sc_frame = - (usb->frame_number + 1) & 0x7f; - else - pipe->split_sc_frame = - (usb->frame_number + 2) & 0x7f; - } else { - pipe->split_sc_frame = -1; - } - - usbc_hcsplt.s.spltena = 1; - usbc_hcsplt.s.hubaddr = pipe->hub_device_addr; - usbc_hcsplt.s.prtaddr = pipe->hub_port; - usbc_hcsplt.s.compsplt = (transaction->stage == - CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE); - - /* - * SPLIT transactions can only ever transmit one data - * packet so limit the transfer size to the max packet - * size - */ - if (bytes_to_transfer > pipe->max_packet) - bytes_to_transfer = pipe->max_packet; - - /* - * ISOCHRONOUS OUT splits are unique in that they limit - * data transfers to 188 byte chunks representing the - * begin/middle/end of the data or all - */ - if (!usbc_hcsplt.s.compsplt && - (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) && - (pipe->transfer_type == - CVMX_USB_TRANSFER_ISOCHRONOUS)) { - /* - * Clear the split complete frame number as - * there isn't going to be a split complete - */ - pipe->split_sc_frame = -1; - /* - * See if we've started this transfer and sent - * data - */ - if (transaction->actual_bytes == 0) { - /* - * Nothing sent yet, this is either a - * begin or the entire payload - */ - if (bytes_to_transfer <= 188) - /* Entire payload in one go */ - usbc_hcsplt.s.xactpos = 3; - else - /* First part of payload */ - usbc_hcsplt.s.xactpos = 2; - } else { - /* - * Continuing the previous data, we must - * either be in the middle or at the end - */ - if (bytes_to_transfer <= 188) - /* End of payload */ - usbc_hcsplt.s.xactpos = 1; - else - /* Middle of payload */ - usbc_hcsplt.s.xactpos = 0; - } - /* - * Again, the transfer size is limited to 188 - * bytes - */ - if (bytes_to_transfer > 188) - bytes_to_transfer = 188; - } - } - - /* - * Make sure the transfer never exceeds the byte limit of the - * hardware. Further bytes will be sent as continued - * transactions - */ - if (bytes_to_transfer > MAX_TRANSFER_BYTES) { - /* - * Round MAX_TRANSFER_BYTES to a multiple of out packet - * size - */ - bytes_to_transfer = MAX_TRANSFER_BYTES / - pipe->max_packet; - bytes_to_transfer *= pipe->max_packet; - } - - /* - * Calculate the number of packets to transfer. If the length is - * zero we still need to transfer one packet - */ - packets_to_transfer = - DIV_ROUND_UP(bytes_to_transfer, pipe->max_packet); - if (packets_to_transfer == 0) { - packets_to_transfer = 1; - } else if ((packets_to_transfer > 1) && - (usb->init_flags & - CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) { - /* - * Limit to one packet when not using DMA. Channels must - * be restarted between every packet for IN - * transactions, so there is no reason to do multiple - * packets in a row - */ - packets_to_transfer = 1; - bytes_to_transfer = packets_to_transfer * - pipe->max_packet; - } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) { - /* - * Limit the number of packet and data transferred to - * what the hardware can handle - */ - packets_to_transfer = MAX_TRANSFER_PACKETS; - bytes_to_transfer = packets_to_transfer * - pipe->max_packet; - } - - usbc_hctsiz.s.xfersize = bytes_to_transfer; - usbc_hctsiz.s.pktcnt = packets_to_transfer; - - /* Update the DATA0/DATA1 toggle */ - usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); - /* - * High speed pipes may need a hardware ping before they start - */ - if (pipe->flags & CVMX_USB_PIPE_FLAGS_NEED_PING) - usbc_hctsiz.s.dopng = 1; - - cvmx_usb_write_csr32(usb, - CVMX_USBCX_HCSPLTX(channel, usb->index), - usbc_hcsplt.u32); - cvmx_usb_write_csr32(usb, - CVMX_USBCX_HCTSIZX(channel, usb->index), - usbc_hctsiz.u32); - } - - /* Setup the Host Channel Characteristics Register */ - { - union cvmx_usbcx_hccharx usbc_hcchar = {.u32 = 0}; - - /* - * Set the startframe odd/even properly. This is only used for - * periodic - */ - usbc_hcchar.s.oddfrm = usb->frame_number & 1; - - /* - * Set the number of back to back packets allowed by this - * endpoint. Split transactions interpret "ec" as the number of - * immediate retries of failure. These retries happen too - * quickly, so we disable these entirely for splits - */ - if (cvmx_usb_pipe_needs_split(usb, pipe)) - usbc_hcchar.s.ec = 1; - else if (pipe->multi_count < 1) - usbc_hcchar.s.ec = 1; - else if (pipe->multi_count > 3) - usbc_hcchar.s.ec = 3; - else - usbc_hcchar.s.ec = pipe->multi_count; - - /* Set the rest of the endpoint specific settings */ - usbc_hcchar.s.devaddr = pipe->device_addr; - usbc_hcchar.s.eptype = transaction->type; - usbc_hcchar.s.lspddev = - (pipe->device_speed == CVMX_USB_SPEED_LOW); - usbc_hcchar.s.epdir = pipe->transfer_dir; - usbc_hcchar.s.epnum = pipe->endpoint_num; - usbc_hcchar.s.mps = pipe->max_packet; - cvmx_usb_write_csr32(usb, - CVMX_USBCX_HCCHARX(channel, usb->index), - usbc_hcchar.u32); - } - - /* Do transaction type specific fixups as needed */ - switch (transaction->type) { - case CVMX_USB_TRANSFER_CONTROL: - cvmx_usb_start_channel_control(usb, channel, pipe); - break; - case CVMX_USB_TRANSFER_BULK: - case CVMX_USB_TRANSFER_INTERRUPT: - break; - case CVMX_USB_TRANSFER_ISOCHRONOUS: - if (!cvmx_usb_pipe_needs_split(usb, pipe)) { - /* - * ISO transactions require different PIDs depending on - * direction and how many packets are needed - */ - if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) { - if (pipe->multi_count < 2) /* Need DATA0 */ - USB_SET_FIELD32( - CVMX_USBCX_HCTSIZX(channel, - usb->index), - cvmx_usbcx_hctsizx, pid, 0); - else /* Need MDATA */ - USB_SET_FIELD32( - CVMX_USBCX_HCTSIZX(channel, - usb->index), - cvmx_usbcx_hctsizx, pid, 3); - } - } - break; - } - { - union cvmx_usbcx_hctsizx usbc_hctsiz = { .u32 = - cvmx_usb_read_csr32(usb, - CVMX_USBCX_HCTSIZX(channel, - usb->index)) - }; - transaction->xfersize = usbc_hctsiz.s.xfersize; - transaction->pktcnt = usbc_hctsiz.s.pktcnt; - } - /* Remember when we start a split transaction */ - if (cvmx_usb_pipe_needs_split(usb, pipe)) - usb->active_split = transaction; - USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), - cvmx_usbcx_hccharx, chena, 1); - if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) - cvmx_usb_fill_tx_fifo(usb, channel); -} - -/** - * Find a pipe that is ready to be scheduled to hardware. - * @usb: USB device state populated by cvmx_usb_initialize(). - * @xfer_type: Transfer type - * - * Returns: Pipe or NULL if none are ready - */ -static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(struct octeon_hcd *usb, - enum cvmx_usb_transfer xfer_type) -{ - struct list_head *list = usb->active_pipes + xfer_type; - u64 current_frame = usb->frame_number; - struct cvmx_usb_pipe *pipe; - - list_for_each_entry(pipe, list, node) { - struct cvmx_usb_transaction *t = - list_first_entry(&pipe->transactions, typeof(*t), - node); - if (!(pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED) && t && - (pipe->next_tx_frame <= current_frame) && - ((pipe->split_sc_frame == -1) || - ((((int)current_frame - pipe->split_sc_frame) & 0x7f) < - 0x40)) && - (!usb->active_split || (usb->active_split == t))) { - prefetch(t); - return pipe; - } - } - return NULL; -} - -static struct cvmx_usb_pipe *cvmx_usb_next_pipe(struct octeon_hcd *usb, - int is_sof) -{ - struct cvmx_usb_pipe *pipe; - - /* Find a pipe needing service. */ - if (is_sof) { - /* - * Only process periodic pipes on SOF interrupts. This way we - * are sure that the periodic data is sent in the beginning of - * the frame. - */ - pipe = cvmx_usb_find_ready_pipe(usb, - CVMX_USB_TRANSFER_ISOCHRONOUS); - if (pipe) - return pipe; - pipe = cvmx_usb_find_ready_pipe(usb, - CVMX_USB_TRANSFER_INTERRUPT); - if (pipe) - return pipe; - } - pipe = cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_CONTROL); - if (pipe) - return pipe; - return cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_BULK); -} - -/** - * Called whenever a pipe might need to be scheduled to the - * hardware. - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * @is_sof: True if this schedule was called on a SOF interrupt. - */ -static void cvmx_usb_schedule(struct octeon_hcd *usb, int is_sof) -{ - int channel; - struct cvmx_usb_pipe *pipe; - int need_sof; - enum cvmx_usb_transfer ttype; - - if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) { - /* - * Without DMA we need to be careful to not schedule something - * at the end of a frame and cause an overrun. - */ - union cvmx_usbcx_hfnum hfnum = { - .u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HFNUM(usb->index)) - }; - - union cvmx_usbcx_hfir hfir = { - .u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HFIR(usb->index)) - }; - - if (hfnum.s.frrem < hfir.s.frint / 4) - goto done; - } - - while (usb->idle_hardware_channels) { - /* Find an idle channel */ - channel = __fls(usb->idle_hardware_channels); - if (unlikely(channel > 7)) - break; - - pipe = cvmx_usb_next_pipe(usb, is_sof); - if (!pipe) - break; - - cvmx_usb_start_channel(usb, channel, pipe); - } - -done: - /* - * Only enable SOF interrupts when we have transactions pending in the - * future that might need to be scheduled - */ - need_sof = 0; - for (ttype = CVMX_USB_TRANSFER_CONTROL; - ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) { - list_for_each_entry(pipe, &usb->active_pipes[ttype], node) { - if (pipe->next_tx_frame > usb->frame_number) { - need_sof = 1; - break; - } - } - } - USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), - cvmx_usbcx_gintmsk, sofmsk, need_sof); -} - -static void octeon_usb_urb_complete_callback(struct octeon_hcd *usb, - enum cvmx_usb_status status, - struct cvmx_usb_pipe *pipe, - struct cvmx_usb_transaction - *transaction, - int bytes_transferred, - struct urb *urb) -{ - struct usb_hcd *hcd = octeon_to_hcd(usb); - struct device *dev = hcd->self.controller; - - if (likely(status == CVMX_USB_STATUS_OK)) - urb->actual_length = bytes_transferred; - else - urb->actual_length = 0; - - urb->hcpriv = NULL; - - /* For Isochronous transactions we need to update the URB packet status - * list from data in our private copy - */ - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { - int i; - /* - * The pointer to the private list is stored in the setup_packet - * field. - */ - struct cvmx_usb_iso_packet *iso_packet = - (struct cvmx_usb_iso_packet *)urb->setup_packet; - /* Recalculate the transfer size by adding up each packet */ - urb->actual_length = 0; - for (i = 0; i < urb->number_of_packets; i++) { - if (iso_packet[i].status == CVMX_USB_STATUS_OK) { - urb->iso_frame_desc[i].status = 0; - urb->iso_frame_desc[i].actual_length = - iso_packet[i].length; - urb->actual_length += - urb->iso_frame_desc[i].actual_length; - } else { - dev_dbg(dev, "ISOCHRONOUS packet=%d of %d status=%d pipe=%p transaction=%p size=%d\n", - i, urb->number_of_packets, - iso_packet[i].status, pipe, - transaction, iso_packet[i].length); - urb->iso_frame_desc[i].status = -EREMOTEIO; - } - } - /* Free the private list now that we don't need it anymore */ - kfree(iso_packet); - urb->setup_packet = NULL; - } - - switch (status) { - case CVMX_USB_STATUS_OK: - urb->status = 0; - break; - case CVMX_USB_STATUS_CANCEL: - if (urb->status == 0) - urb->status = -ENOENT; - break; - case CVMX_USB_STATUS_STALL: - dev_dbg(dev, "status=stall pipe=%p transaction=%p size=%d\n", - pipe, transaction, bytes_transferred); - urb->status = -EPIPE; - break; - case CVMX_USB_STATUS_BABBLEERR: - dev_dbg(dev, "status=babble pipe=%p transaction=%p size=%d\n", - pipe, transaction, bytes_transferred); - urb->status = -EPIPE; - break; - case CVMX_USB_STATUS_SHORT: - dev_dbg(dev, "status=short pipe=%p transaction=%p size=%d\n", - pipe, transaction, bytes_transferred); - urb->status = -EREMOTEIO; - break; - case CVMX_USB_STATUS_ERROR: - case CVMX_USB_STATUS_XACTERR: - case CVMX_USB_STATUS_DATATGLERR: - case CVMX_USB_STATUS_FRAMEERR: - dev_dbg(dev, "status=%d pipe=%p transaction=%p size=%d\n", - status, pipe, transaction, bytes_transferred); - urb->status = -EPROTO; - break; - } - usb_hcd_unlink_urb_from_ep(octeon_to_hcd(usb), urb); - spin_unlock(&usb->lock); - usb_hcd_giveback_urb(octeon_to_hcd(usb), urb, urb->status); - spin_lock(&usb->lock); -} - -/** - * Signal the completion of a transaction and free it. The - * transaction will be removed from the pipe transaction list. - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * @pipe: Pipe the transaction is on - * @transaction: - * Transaction that completed - * @complete_code: - * Completion code - */ -static void cvmx_usb_complete(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct cvmx_usb_transaction *transaction, - enum cvmx_usb_status complete_code) -{ - /* If this was a split then clear our split in progress marker */ - if (usb->active_split == transaction) - usb->active_split = NULL; - - /* - * Isochronous transactions need extra processing as they might not be - * done after a single data transfer - */ - if (unlikely(transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)) { - /* Update the number of bytes transferred in this ISO packet */ - transaction->iso_packets[0].length = transaction->actual_bytes; - transaction->iso_packets[0].status = complete_code; - - /* - * If there are more ISOs pending and we succeeded, schedule the - * next one - */ - if ((transaction->iso_number_packets > 1) && - (complete_code == CVMX_USB_STATUS_OK)) { - /* No bytes transferred for this packet as of yet */ - transaction->actual_bytes = 0; - /* One less ISO waiting to transfer */ - transaction->iso_number_packets--; - /* Increment to the next location in our packet array */ - transaction->iso_packets++; - transaction->stage = CVMX_USB_STAGE_NON_CONTROL; - return; - } - } - - /* Remove the transaction from the pipe list */ - list_del(&transaction->node); - if (list_empty(&pipe->transactions)) - list_move_tail(&pipe->node, &usb->idle_pipes); - octeon_usb_urb_complete_callback(usb, complete_code, pipe, - transaction, - transaction->actual_bytes, - transaction->urb); - kfree(transaction); -} - -/** - * Submit a usb transaction to a pipe. Called for all types - * of transactions. - * - * @usb: - * @pipe: Which pipe to submit to. - * @type: Transaction type - * @buffer: User buffer for the transaction - * @buffer_length: - * User buffer's length in bytes - * @control_header: - * For control transactions, the 8 byte standard header - * @iso_start_frame: - * For ISO transactions, the start frame - * @iso_number_packets: - * For ISO, the number of packet in the transaction. - * @iso_packets: - * A description of each ISO packet - * @urb: URB for the callback - * - * Returns: Transaction or NULL on failure. - */ -static struct cvmx_usb_transaction *cvmx_usb_submit_transaction( - struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - enum cvmx_usb_transfer type, - u64 buffer, - int buffer_length, - u64 control_header, - int iso_start_frame, - int iso_number_packets, - struct cvmx_usb_iso_packet *iso_packets, - struct urb *urb) -{ - struct cvmx_usb_transaction *transaction; - - if (unlikely(pipe->transfer_type != type)) - return NULL; - - transaction = kzalloc(sizeof(*transaction), GFP_ATOMIC); - if (unlikely(!transaction)) - return NULL; - - transaction->type = type; - transaction->buffer = buffer; - transaction->buffer_length = buffer_length; - transaction->control_header = control_header; - /* FIXME: This is not used, implement it. */ - transaction->iso_start_frame = iso_start_frame; - transaction->iso_number_packets = iso_number_packets; - transaction->iso_packets = iso_packets; - transaction->urb = urb; - if (transaction->type == CVMX_USB_TRANSFER_CONTROL) - transaction->stage = CVMX_USB_STAGE_SETUP; - else - transaction->stage = CVMX_USB_STAGE_NON_CONTROL; - - if (!list_empty(&pipe->transactions)) { - list_add_tail(&transaction->node, &pipe->transactions); - } else { - list_add_tail(&transaction->node, &pipe->transactions); - list_move_tail(&pipe->node, - &usb->active_pipes[pipe->transfer_type]); - - /* - * We may need to schedule the pipe if this was the head of the - * pipe. - */ - cvmx_usb_schedule(usb, 0); - } - - return transaction; -} - -/** - * Call to submit a USB Bulk transfer to a pipe. - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * @pipe: Handle to the pipe for the transfer. - * @urb: URB. - * - * Returns: A submitted transaction or NULL on failure. - */ -static struct cvmx_usb_transaction *cvmx_usb_submit_bulk( - struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct urb *urb) -{ - return cvmx_usb_submit_transaction(usb, pipe, CVMX_USB_TRANSFER_BULK, - urb->transfer_dma, - urb->transfer_buffer_length, - 0, /* control_header */ - 0, /* iso_start_frame */ - 0, /* iso_number_packets */ - NULL, /* iso_packets */ - urb); -} - -/** - * Call to submit a USB Interrupt transfer to a pipe. - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * @pipe: Handle to the pipe for the transfer. - * @urb: URB returned when the callback is called. - * - * Returns: A submitted transaction or NULL on failure. - */ -static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt( - struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct urb *urb) -{ - return cvmx_usb_submit_transaction(usb, pipe, - CVMX_USB_TRANSFER_INTERRUPT, - urb->transfer_dma, - urb->transfer_buffer_length, - 0, /* control_header */ - 0, /* iso_start_frame */ - 0, /* iso_number_packets */ - NULL, /* iso_packets */ - urb); -} - -/** - * Call to submit a USB Control transfer to a pipe. - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * @pipe: Handle to the pipe for the transfer. - * @urb: URB. - * - * Returns: A submitted transaction or NULL on failure. - */ -static struct cvmx_usb_transaction *cvmx_usb_submit_control( - struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct urb *urb) -{ - int buffer_length = urb->transfer_buffer_length; - u64 control_header = urb->setup_dma; - struct usb_ctrlrequest *header = cvmx_phys_to_ptr(control_header); - - if ((header->bRequestType & USB_DIR_IN) == 0) - buffer_length = le16_to_cpu(header->wLength); - - return cvmx_usb_submit_transaction(usb, pipe, - CVMX_USB_TRANSFER_CONTROL, - urb->transfer_dma, buffer_length, - control_header, - 0, /* iso_start_frame */ - 0, /* iso_number_packets */ - NULL, /* iso_packets */ - urb); -} - -/** - * Call to submit a USB Isochronous transfer to a pipe. - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * @pipe: Handle to the pipe for the transfer. - * @urb: URB returned when the callback is called. - * - * Returns: A submitted transaction or NULL on failure. - */ -static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous( - struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct urb *urb) -{ - struct cvmx_usb_iso_packet *packets; - - packets = (struct cvmx_usb_iso_packet *)urb->setup_packet; - return cvmx_usb_submit_transaction(usb, pipe, - CVMX_USB_TRANSFER_ISOCHRONOUS, - urb->transfer_dma, - urb->transfer_buffer_length, - 0, /* control_header */ - urb->start_frame, - urb->number_of_packets, - packets, urb); -} - -/** - * Cancel one outstanding request in a pipe. Canceling a request - * can fail if the transaction has already completed before cancel - * is called. Even after a successful cancel call, it may take - * a frame or two for the cvmx_usb_poll() function to call the - * associated callback. - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * @pipe: Pipe to cancel requests in. - * @transaction: Transaction to cancel, returned by the submit function. - * - * Returns: 0 or a negative error code. - */ -static int cvmx_usb_cancel(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct cvmx_usb_transaction *transaction) -{ - /* - * If the transaction is the HEAD of the queue and scheduled. We need to - * treat it special - */ - if (list_first_entry(&pipe->transactions, typeof(*transaction), node) == - transaction && (pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED)) { - union cvmx_usbcx_hccharx usbc_hcchar; - - usb->pipe_for_channel[pipe->channel] = NULL; - pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED; - - CVMX_SYNCW; - - usbc_hcchar.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HCCHARX(pipe->channel, - usb->index)); - /* - * If the channel isn't enabled then the transaction already - * completed. - */ - if (usbc_hcchar.s.chena) { - usbc_hcchar.s.chdis = 1; - cvmx_usb_write_csr32(usb, - CVMX_USBCX_HCCHARX(pipe->channel, - usb->index), - usbc_hcchar.u32); - } - } - cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_CANCEL); - return 0; -} - -/** - * Cancel all outstanding requests in a pipe. Logically all this - * does is call cvmx_usb_cancel() in a loop. - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * @pipe: Pipe to cancel requests in. - * - * Returns: 0 or a negative error code. - */ -static int cvmx_usb_cancel_all(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe) -{ - struct cvmx_usb_transaction *transaction, *next; - - /* Simply loop through and attempt to cancel each transaction */ - list_for_each_entry_safe(transaction, next, &pipe->transactions, node) { - int result = cvmx_usb_cancel(usb, pipe, transaction); - - if (unlikely(result != 0)) - return result; - } - return 0; -} - -/** - * Close a pipe created with cvmx_usb_open_pipe(). - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * @pipe: Pipe to close. - * - * Returns: 0 or a negative error code. EBUSY is returned if the pipe has - * outstanding transfers. - */ -static int cvmx_usb_close_pipe(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe) -{ - /* Fail if the pipe has pending transactions */ - if (!list_empty(&pipe->transactions)) - return -EBUSY; - - list_del(&pipe->node); - kfree(pipe); - - return 0; -} - -/** - * Get the current USB protocol level frame number. The frame - * number is always in the range of 0-0x7ff. - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * - * Returns: USB frame number - */ -static int cvmx_usb_get_frame_number(struct octeon_hcd *usb) -{ - union cvmx_usbcx_hfnum usbc_hfnum; - - usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index)); - - return usbc_hfnum.s.frnum; -} - -static void cvmx_usb_transfer_control(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct cvmx_usb_transaction *transaction, - union cvmx_usbcx_hccharx usbc_hcchar, - int buffer_space_left, - int bytes_in_last_packet) -{ - switch (transaction->stage) { - case CVMX_USB_STAGE_NON_CONTROL: - case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE: - /* This should be impossible */ - cvmx_usb_complete(usb, pipe, transaction, - CVMX_USB_STATUS_ERROR); - break; - case CVMX_USB_STAGE_SETUP: - pipe->pid_toggle = 1; - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - transaction->stage = - CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE; - } else { - struct usb_ctrlrequest *header = - cvmx_phys_to_ptr(transaction->control_header); - if (header->wLength) - transaction->stage = CVMX_USB_STAGE_DATA; - else - transaction->stage = CVMX_USB_STAGE_STATUS; - } - break; - case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE: - { - struct usb_ctrlrequest *header = - cvmx_phys_to_ptr(transaction->control_header); - if (header->wLength) - transaction->stage = CVMX_USB_STAGE_DATA; - else - transaction->stage = CVMX_USB_STAGE_STATUS; - } - break; - case CVMX_USB_STAGE_DATA: - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE; - /* - * For setup OUT data that are splits, - * the hardware doesn't appear to count - * transferred data. Here we manually - * update the data transferred - */ - if (!usbc_hcchar.s.epdir) { - if (buffer_space_left < pipe->max_packet) - transaction->actual_bytes += - buffer_space_left; - else - transaction->actual_bytes += - pipe->max_packet; - } - } else if ((buffer_space_left == 0) || - (bytes_in_last_packet < pipe->max_packet)) { - pipe->pid_toggle = 1; - transaction->stage = CVMX_USB_STAGE_STATUS; - } - break; - case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE: - if ((buffer_space_left == 0) || - (bytes_in_last_packet < pipe->max_packet)) { - pipe->pid_toggle = 1; - transaction->stage = CVMX_USB_STAGE_STATUS; - } else { - transaction->stage = CVMX_USB_STAGE_DATA; - } - break; - case CVMX_USB_STAGE_STATUS: - if (cvmx_usb_pipe_needs_split(usb, pipe)) - transaction->stage = - CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE; - else - cvmx_usb_complete(usb, pipe, transaction, - CVMX_USB_STATUS_OK); - break; - case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE: - cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK); - break; - } -} - -static void cvmx_usb_transfer_bulk(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct cvmx_usb_transaction *transaction, - union cvmx_usbcx_hcintx usbc_hcint, - int buffer_space_left, - int bytes_in_last_packet) -{ - /* - * The only time a bulk transfer isn't complete when it finishes with - * an ACK is during a split transaction. For splits we need to continue - * the transfer if more data is needed. - */ - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL) - transaction->stage = - CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE; - else if (buffer_space_left && - (bytes_in_last_packet == pipe->max_packet)) - transaction->stage = CVMX_USB_STAGE_NON_CONTROL; - else - cvmx_usb_complete(usb, pipe, transaction, - CVMX_USB_STATUS_OK); - } else { - if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) && - (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) && - (usbc_hcint.s.nak)) - pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING; - if (!buffer_space_left || - (bytes_in_last_packet < pipe->max_packet)) - cvmx_usb_complete(usb, pipe, transaction, - CVMX_USB_STATUS_OK); - } -} - -static void cvmx_usb_transfer_intr(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct cvmx_usb_transaction *transaction, - int buffer_space_left, - int bytes_in_last_packet) -{ - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL) { - transaction->stage = - CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE; - } else if (buffer_space_left && - (bytes_in_last_packet == pipe->max_packet)) { - transaction->stage = CVMX_USB_STAGE_NON_CONTROL; - } else { - pipe->next_tx_frame += pipe->interval; - cvmx_usb_complete(usb, pipe, transaction, - CVMX_USB_STATUS_OK); - } - } else if (!buffer_space_left || - (bytes_in_last_packet < pipe->max_packet)) { - pipe->next_tx_frame += pipe->interval; - cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK); - } -} - -static void cvmx_usb_transfer_isoc(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct cvmx_usb_transaction *transaction, - int buffer_space_left, - int bytes_in_last_packet, - int bytes_this_transfer) -{ - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - /* - * ISOCHRONOUS OUT splits don't require a complete split stage. - * Instead they use a sequence of begin OUT splits to transfer - * the data 188 bytes at a time. Once the transfer is complete, - * the pipe sleeps until the next schedule interval. - */ - if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) { - /* - * If no space left or this wasn't a max size packet - * then this transfer is complete. Otherwise start it - * again to send the next 188 bytes - */ - if (!buffer_space_left || (bytes_this_transfer < 188)) { - pipe->next_tx_frame += pipe->interval; - cvmx_usb_complete(usb, pipe, transaction, - CVMX_USB_STATUS_OK); - } - return; - } - if (transaction->stage == - CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) { - /* - * We are in the incoming data phase. Keep getting data - * until we run out of space or get a small packet - */ - if ((buffer_space_left == 0) || - (bytes_in_last_packet < pipe->max_packet)) { - pipe->next_tx_frame += pipe->interval; - cvmx_usb_complete(usb, pipe, transaction, - CVMX_USB_STATUS_OK); - } - } else { - transaction->stage = - CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE; - } - } else { - pipe->next_tx_frame += pipe->interval; - cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK); - } -} - -/** - * Poll a channel for status - * - * @usb: USB device - * @channel: Channel to poll - * - * Returns: Zero on success - */ -static int cvmx_usb_poll_channel(struct octeon_hcd *usb, int channel) -{ - struct usb_hcd *hcd = octeon_to_hcd(usb); - struct device *dev = hcd->self.controller; - union cvmx_usbcx_hcintx usbc_hcint; - union cvmx_usbcx_hctsizx usbc_hctsiz; - union cvmx_usbcx_hccharx usbc_hcchar; - struct cvmx_usb_pipe *pipe; - struct cvmx_usb_transaction *transaction; - int bytes_this_transfer; - int bytes_in_last_packet; - int packets_processed; - int buffer_space_left; - - /* Read the interrupt status bits for the channel */ - usbc_hcint.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HCINTX(channel, usb->index)); - - if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) { - usbc_hcchar.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HCCHARX(channel, - usb->index)); - - if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis) { - /* - * There seems to be a bug in CN31XX which can cause - * interrupt IN transfers to get stuck until we do a - * write of HCCHARX without changing things - */ - cvmx_usb_write_csr32(usb, - CVMX_USBCX_HCCHARX(channel, - usb->index), - usbc_hcchar.u32); - return 0; - } - - /* - * In non DMA mode the channels don't halt themselves. We need - * to manually disable channels that are left running - */ - if (!usbc_hcint.s.chhltd) { - if (usbc_hcchar.s.chena) { - union cvmx_usbcx_hcintmskx hcintmsk; - /* Disable all interrupts except CHHLTD */ - hcintmsk.u32 = 0; - hcintmsk.s.chhltdmsk = 1; - cvmx_usb_write_csr32(usb, - CVMX_USBCX_HCINTMSKX(channel, usb->index), - hcintmsk.u32); - usbc_hcchar.s.chdis = 1; - cvmx_usb_write_csr32(usb, - CVMX_USBCX_HCCHARX(channel, usb->index), - usbc_hcchar.u32); - return 0; - } else if (usbc_hcint.s.xfercompl) { - /* - * Successful IN/OUT with transfer complete. - * Channel halt isn't needed. - */ - } else { - dev_err(dev, "USB%d: Channel %d interrupt without halt\n", - usb->index, channel); - return 0; - } - } - } else { - /* - * There is are no interrupts that we need to process when the - * channel is still running - */ - if (!usbc_hcint.s.chhltd) - return 0; - } - - /* Disable the channel interrupts now that it is done */ - cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0); - usb->idle_hardware_channels |= (1 << channel); - - /* Make sure this channel is tied to a valid pipe */ - pipe = usb->pipe_for_channel[channel]; - prefetch(pipe); - if (!pipe) - return 0; - transaction = list_first_entry(&pipe->transactions, - typeof(*transaction), - node); - prefetch(transaction); - - /* - * Disconnect this pipe from the HW channel. Later the schedule - * function will figure out which pipe needs to go - */ - usb->pipe_for_channel[channel] = NULL; - pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED; - - /* - * Read the channel config info so we can figure out how much data - * transferred - */ - usbc_hcchar.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HCCHARX(channel, usb->index)); - usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HCTSIZX(channel, usb->index)); - - /* - * Calculating the number of bytes successfully transferred is dependent - * on the transfer direction - */ - packets_processed = transaction->pktcnt - usbc_hctsiz.s.pktcnt; - if (usbc_hcchar.s.epdir) { - /* - * IN transactions are easy. For every byte received the - * hardware decrements xfersize. All we need to do is subtract - * the current value of xfersize from its starting value and we - * know how many bytes were written to the buffer - */ - bytes_this_transfer = transaction->xfersize - - usbc_hctsiz.s.xfersize; - } else { - /* - * OUT transaction don't decrement xfersize. Instead pktcnt is - * decremented on every successful packet send. The hardware - * does this when it receives an ACK, or NYET. If it doesn't - * receive one of these responses pktcnt doesn't change - */ - bytes_this_transfer = packets_processed * usbc_hcchar.s.mps; - /* - * The last packet may not be a full transfer if we didn't have - * enough data - */ - if (bytes_this_transfer > transaction->xfersize) - bytes_this_transfer = transaction->xfersize; - } - /* Figure out how many bytes were in the last packet of the transfer */ - if (packets_processed) - bytes_in_last_packet = bytes_this_transfer - - (packets_processed - 1) * usbc_hcchar.s.mps; - else - bytes_in_last_packet = bytes_this_transfer; - - /* - * As a special case, setup transactions output the setup header, not - * the user's data. For this reason we don't count setup data as bytes - * transferred - */ - if ((transaction->stage == CVMX_USB_STAGE_SETUP) || - (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE)) - bytes_this_transfer = 0; - - /* - * Add the bytes transferred to the running total. It is important that - * bytes_this_transfer doesn't count any data that needs to be - * retransmitted - */ - transaction->actual_bytes += bytes_this_transfer; - if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS) - buffer_space_left = transaction->iso_packets[0].length - - transaction->actual_bytes; - else - buffer_space_left = transaction->buffer_length - - transaction->actual_bytes; - - /* - * We need to remember the PID toggle state for the next transaction. - * The hardware already updated it for the next transaction - */ - pipe->pid_toggle = !(usbc_hctsiz.s.pid == 0); - - /* - * For high speed bulk out, assume the next transaction will need to do - * a ping before proceeding. If this isn't true the ACK processing below - * will clear this flag - */ - if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) && - (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) && - (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT)) - pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING; - - if (WARN_ON_ONCE(bytes_this_transfer < 0)) { - /* - * In some rare cases the DMA engine seems to get stuck and - * keeps substracting same byte count over and over again. In - * such case we just need to fail every transaction. - */ - cvmx_usb_complete(usb, pipe, transaction, - CVMX_USB_STATUS_ERROR); - return 0; - } - - if (usbc_hcint.s.stall) { - /* - * STALL as a response means this transaction cannot be - * completed because the device can't process transactions. Tell - * the user. Any data that was transferred will be counted on - * the actual bytes transferred - */ - pipe->pid_toggle = 0; - cvmx_usb_complete(usb, pipe, transaction, - CVMX_USB_STATUS_STALL); - } else if (usbc_hcint.s.xacterr) { - /* - * XactErr as a response means the device signaled - * something wrong with the transfer. For example, PID - * toggle errors cause these. - */ - cvmx_usb_complete(usb, pipe, transaction, - CVMX_USB_STATUS_XACTERR); - } else if (usbc_hcint.s.bblerr) { - /* Babble Error (BblErr) */ - cvmx_usb_complete(usb, pipe, transaction, - CVMX_USB_STATUS_BABBLEERR); - } else if (usbc_hcint.s.datatglerr) { - /* Data toggle error */ - cvmx_usb_complete(usb, pipe, transaction, - CVMX_USB_STATUS_DATATGLERR); - } else if (usbc_hcint.s.nyet) { - /* - * NYET as a response is only allowed in three cases: as a - * response to a ping, as a response to a split transaction, and - * as a response to a bulk out. The ping case is handled by - * hardware, so we only have splits and bulk out - */ - if (!cvmx_usb_pipe_needs_split(usb, pipe)) { - transaction->retries = 0; - /* - * If there is more data to go then we need to try - * again. Otherwise this transaction is complete - */ - if ((buffer_space_left == 0) || - (bytes_in_last_packet < pipe->max_packet)) - cvmx_usb_complete(usb, pipe, - transaction, - CVMX_USB_STATUS_OK); - } else { - /* - * Split transactions retry the split complete 4 times - * then rewind to the start split and do the entire - * transactions again - */ - transaction->retries++; - if ((transaction->retries & 0x3) == 0) { - /* - * Rewind to the beginning of the transaction by - * anding off the split complete bit - */ - transaction->stage &= ~1; - pipe->split_sc_frame = -1; - } - } - } else if (usbc_hcint.s.ack) { - transaction->retries = 0; - /* - * The ACK bit can only be checked after the other error bits. - * This is because a multi packet transfer may succeed in a - * number of packets and then get a different response on the - * last packet. In this case both ACK and the last response bit - * will be set. If none of the other response bits is set, then - * the last packet must have been an ACK - * - * Since we got an ACK, we know we don't need to do a ping on - * this pipe - */ - pipe->flags &= ~CVMX_USB_PIPE_FLAGS_NEED_PING; - - switch (transaction->type) { - case CVMX_USB_TRANSFER_CONTROL: - cvmx_usb_transfer_control(usb, pipe, transaction, - usbc_hcchar, - buffer_space_left, - bytes_in_last_packet); - break; - case CVMX_USB_TRANSFER_BULK: - cvmx_usb_transfer_bulk(usb, pipe, transaction, - usbc_hcint, buffer_space_left, - bytes_in_last_packet); - break; - case CVMX_USB_TRANSFER_INTERRUPT: - cvmx_usb_transfer_intr(usb, pipe, transaction, - buffer_space_left, - bytes_in_last_packet); - break; - case CVMX_USB_TRANSFER_ISOCHRONOUS: - cvmx_usb_transfer_isoc(usb, pipe, transaction, - buffer_space_left, - bytes_in_last_packet, - bytes_this_transfer); - break; - } - } else if (usbc_hcint.s.nak) { - /* - * If this was a split then clear our split in progress marker. - */ - if (usb->active_split == transaction) - usb->active_split = NULL; - /* - * NAK as a response means the device couldn't accept the - * transaction, but it should be retried in the future. Rewind - * to the beginning of the transaction by anding off the split - * complete bit. Retry in the next interval - */ - transaction->retries = 0; - transaction->stage &= ~1; - pipe->next_tx_frame += pipe->interval; - if (pipe->next_tx_frame < usb->frame_number) - pipe->next_tx_frame = usb->frame_number + - pipe->interval - - (usb->frame_number - pipe->next_tx_frame) % - pipe->interval; - } else { - struct cvmx_usb_port_status port; - - port = cvmx_usb_get_status(usb); - if (port.port_enabled) { - /* We'll retry the exact same transaction again */ - transaction->retries++; - } else { - /* - * We get channel halted interrupts with no result bits - * sets when the cable is unplugged - */ - cvmx_usb_complete(usb, pipe, transaction, - CVMX_USB_STATUS_ERROR); - } - } - return 0; -} - -static void octeon_usb_port_callback(struct octeon_hcd *usb) -{ - spin_unlock(&usb->lock); - usb_hcd_poll_rh_status(octeon_to_hcd(usb)); - spin_lock(&usb->lock); -} - -/** - * Poll the USB block for status and call all needed callback - * handlers. This function is meant to be called in the interrupt - * handler for the USB controller. It can also be called - * periodically in a loop for non-interrupt based operation. - * - * @usb: USB device state populated by cvmx_usb_initialize(). - * - * Returns: 0 or a negative error code. - */ -static int cvmx_usb_poll(struct octeon_hcd *usb) -{ - union cvmx_usbcx_hfnum usbc_hfnum; - union cvmx_usbcx_gintsts usbc_gintsts; - - prefetch_range(usb, sizeof(*usb)); - - /* Update the frame counter */ - usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index)); - if ((usb->frame_number & 0x3fff) > usbc_hfnum.s.frnum) - usb->frame_number += 0x4000; - usb->frame_number &= ~0x3fffull; - usb->frame_number |= usbc_hfnum.s.frnum; - - /* Read the pending interrupts */ - usbc_gintsts.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_GINTSTS(usb->index)); - - /* Clear the interrupts now that we know about them */ - cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index), - usbc_gintsts.u32); - - if (usbc_gintsts.s.rxflvl) { - /* - * RxFIFO Non-Empty (RxFLvl) - * Indicates that there is at least one packet pending to be - * read from the RxFIFO. - * - * In DMA mode this is handled by hardware - */ - if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) - cvmx_usb_poll_rx_fifo(usb); - } - if (usbc_gintsts.s.ptxfemp || usbc_gintsts.s.nptxfemp) { - /* Fill the Tx FIFOs when not in DMA mode */ - if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) - cvmx_usb_poll_tx_fifo(usb); - } - if (usbc_gintsts.s.disconnint || usbc_gintsts.s.prtint) { - union cvmx_usbcx_hprt usbc_hprt; - /* - * Disconnect Detected Interrupt (DisconnInt) - * Asserted when a device disconnect is detected. - * - * Host Port Interrupt (PrtInt) - * The core sets this bit to indicate a change in port status of - * one of the O2P USB core ports in Host mode. The application - * must read the Host Port Control and Status (HPRT) register to - * determine the exact event that caused this interrupt. The - * application must clear the appropriate status bit in the Host - * Port Control and Status register to clear this bit. - * - * Call the user's port callback - */ - octeon_usb_port_callback(usb); - /* Clear the port change bits */ - usbc_hprt.u32 = - cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index)); - usbc_hprt.s.prtena = 0; - cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index), - usbc_hprt.u32); - } - if (usbc_gintsts.s.hchint) { - /* - * Host Channels Interrupt (HChInt) - * The core sets this bit to indicate that an interrupt is - * pending on one of the channels of the core (in Host mode). - * The application must read the Host All Channels Interrupt - * (HAINT) register to determine the exact number of the channel - * on which the interrupt occurred, and then read the - * corresponding Host Channel-n Interrupt (HCINTn) register to - * determine the exact cause of the interrupt. The application - * must clear the appropriate status bit in the HCINTn register - * to clear this bit. - */ - union cvmx_usbcx_haint usbc_haint; - - usbc_haint.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HAINT(usb->index)); - while (usbc_haint.u32) { - int channel; - - channel = __fls(usbc_haint.u32); - cvmx_usb_poll_channel(usb, channel); - usbc_haint.u32 ^= 1 << channel; - } - } - - cvmx_usb_schedule(usb, usbc_gintsts.s.sof); - - return 0; -} - -/* convert between an HCD pointer and the corresponding struct octeon_hcd */ -static inline struct octeon_hcd *hcd_to_octeon(struct usb_hcd *hcd) -{ - return (struct octeon_hcd *)(hcd->hcd_priv); -} - -static irqreturn_t octeon_usb_irq(struct usb_hcd *hcd) -{ - struct octeon_hcd *usb = hcd_to_octeon(hcd); - unsigned long flags; - - spin_lock_irqsave(&usb->lock, flags); - cvmx_usb_poll(usb); - spin_unlock_irqrestore(&usb->lock, flags); - return IRQ_HANDLED; -} - -static int octeon_usb_start(struct usb_hcd *hcd) -{ - hcd->state = HC_STATE_RUNNING; - return 0; -} - -static void octeon_usb_stop(struct usb_hcd *hcd) -{ - hcd->state = HC_STATE_HALT; -} - -static int octeon_usb_get_frame_number(struct usb_hcd *hcd) -{ - struct octeon_hcd *usb = hcd_to_octeon(hcd); - - return cvmx_usb_get_frame_number(usb); -} - -static int octeon_usb_urb_enqueue(struct usb_hcd *hcd, - struct urb *urb, - gfp_t mem_flags) -{ - struct octeon_hcd *usb = hcd_to_octeon(hcd); - struct device *dev = hcd->self.controller; - struct cvmx_usb_transaction *transaction = NULL; - struct cvmx_usb_pipe *pipe; - unsigned long flags; - struct cvmx_usb_iso_packet *iso_packet; - struct usb_host_endpoint *ep = urb->ep; - int rc; - - urb->status = 0; - spin_lock_irqsave(&usb->lock, flags); - - rc = usb_hcd_link_urb_to_ep(hcd, urb); - if (rc) { - spin_unlock_irqrestore(&usb->lock, flags); - return rc; - } - - if (!ep->hcpriv) { - enum cvmx_usb_transfer transfer_type; - enum cvmx_usb_speed speed; - int split_device = 0; - int split_port = 0; - - switch (usb_pipetype(urb->pipe)) { - case PIPE_ISOCHRONOUS: - transfer_type = CVMX_USB_TRANSFER_ISOCHRONOUS; - break; - case PIPE_INTERRUPT: - transfer_type = CVMX_USB_TRANSFER_INTERRUPT; - break; - case PIPE_CONTROL: - transfer_type = CVMX_USB_TRANSFER_CONTROL; - break; - default: - transfer_type = CVMX_USB_TRANSFER_BULK; - break; - } - switch (urb->dev->speed) { - case USB_SPEED_LOW: - speed = CVMX_USB_SPEED_LOW; - break; - case USB_SPEED_FULL: - speed = CVMX_USB_SPEED_FULL; - break; - default: - speed = CVMX_USB_SPEED_HIGH; - break; - } - /* - * For slow devices on high speed ports we need to find the hub - * that does the speed translation so we know where to send the - * split transactions. - */ - if (speed != CVMX_USB_SPEED_HIGH) { - /* - * Start at this device and work our way up the usb - * tree. - */ - struct usb_device *dev = urb->dev; - - while (dev->parent) { - /* - * If our parent is high speed then he'll - * receive the splits. - */ - if (dev->parent->speed == USB_SPEED_HIGH) { - split_device = dev->parent->devnum; - split_port = dev->portnum; - break; - } - /* - * Move up the tree one level. If we make it all - * the way up the tree, then the port must not - * be in high speed mode and we don't need a - * split. - */ - dev = dev->parent; - } - } - pipe = cvmx_usb_open_pipe(usb, usb_pipedevice(urb->pipe), - usb_pipeendpoint(urb->pipe), speed, - le16_to_cpu(ep->desc.wMaxPacketSize) - & 0x7ff, - transfer_type, - usb_pipein(urb->pipe) ? - CVMX_USB_DIRECTION_IN : - CVMX_USB_DIRECTION_OUT, - urb->interval, - (le16_to_cpu(ep->desc.wMaxPacketSize) - >> 11) & 0x3, - split_device, split_port); - if (!pipe) { - usb_hcd_unlink_urb_from_ep(hcd, urb); - spin_unlock_irqrestore(&usb->lock, flags); - dev_dbg(dev, "Failed to create pipe\n"); - return -ENOMEM; - } - ep->hcpriv = pipe; - } else { - pipe = ep->hcpriv; - } - - switch (usb_pipetype(urb->pipe)) { - case PIPE_ISOCHRONOUS: - dev_dbg(dev, "Submit isochronous to %d.%d\n", - usb_pipedevice(urb->pipe), - usb_pipeendpoint(urb->pipe)); - /* - * Allocate a structure to use for our private list of - * isochronous packets. - */ - iso_packet = kmalloc_array(urb->number_of_packets, - sizeof(struct cvmx_usb_iso_packet), - GFP_ATOMIC); - if (iso_packet) { - int i; - /* Fill the list with the data from the URB */ - for (i = 0; i < urb->number_of_packets; i++) { - iso_packet[i].offset = - urb->iso_frame_desc[i].offset; - iso_packet[i].length = - urb->iso_frame_desc[i].length; - iso_packet[i].status = CVMX_USB_STATUS_ERROR; - } - /* - * Store a pointer to the list in the URB setup_packet - * field. We know this currently isn't being used and - * this saves us a bunch of logic. - */ - urb->setup_packet = (char *)iso_packet; - transaction = cvmx_usb_submit_isochronous(usb, - pipe, urb); - /* - * If submit failed we need to free our private packet - * list. - */ - if (!transaction) { - urb->setup_packet = NULL; - kfree(iso_packet); - } - } - break; - case PIPE_INTERRUPT: - dev_dbg(dev, "Submit interrupt to %d.%d\n", - usb_pipedevice(urb->pipe), - usb_pipeendpoint(urb->pipe)); - transaction = cvmx_usb_submit_interrupt(usb, pipe, urb); - break; - case PIPE_CONTROL: - dev_dbg(dev, "Submit control to %d.%d\n", - usb_pipedevice(urb->pipe), - usb_pipeendpoint(urb->pipe)); - transaction = cvmx_usb_submit_control(usb, pipe, urb); - break; - case PIPE_BULK: - dev_dbg(dev, "Submit bulk to %d.%d\n", - usb_pipedevice(urb->pipe), - usb_pipeendpoint(urb->pipe)); - transaction = cvmx_usb_submit_bulk(usb, pipe, urb); - break; - } - if (!transaction) { - usb_hcd_unlink_urb_from_ep(hcd, urb); - spin_unlock_irqrestore(&usb->lock, flags); - dev_dbg(dev, "Failed to submit\n"); - return -ENOMEM; - } - urb->hcpriv = transaction; - spin_unlock_irqrestore(&usb->lock, flags); - return 0; -} - -static int octeon_usb_urb_dequeue(struct usb_hcd *hcd, - struct urb *urb, - int status) -{ - struct octeon_hcd *usb = hcd_to_octeon(hcd); - unsigned long flags; - int rc; - - if (!urb->dev) - return -EINVAL; - - spin_lock_irqsave(&usb->lock, flags); - - rc = usb_hcd_check_unlink_urb(hcd, urb, status); - if (rc) - goto out; - - urb->status = status; - cvmx_usb_cancel(usb, urb->ep->hcpriv, urb->hcpriv); - -out: - spin_unlock_irqrestore(&usb->lock, flags); - - return rc; -} - -static void octeon_usb_endpoint_disable(struct usb_hcd *hcd, - struct usb_host_endpoint *ep) -{ - struct device *dev = hcd->self.controller; - - if (ep->hcpriv) { - struct octeon_hcd *usb = hcd_to_octeon(hcd); - struct cvmx_usb_pipe *pipe = ep->hcpriv; - unsigned long flags; - - spin_lock_irqsave(&usb->lock, flags); - cvmx_usb_cancel_all(usb, pipe); - if (cvmx_usb_close_pipe(usb, pipe)) - dev_dbg(dev, "Closing pipe %p failed\n", pipe); - spin_unlock_irqrestore(&usb->lock, flags); - ep->hcpriv = NULL; - } -} - -static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf) -{ - struct octeon_hcd *usb = hcd_to_octeon(hcd); - struct cvmx_usb_port_status port_status; - unsigned long flags; - - spin_lock_irqsave(&usb->lock, flags); - port_status = cvmx_usb_get_status(usb); - spin_unlock_irqrestore(&usb->lock, flags); - buf[0] = port_status.connect_change << 1; - - return buf[0] != 0; -} - -static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, - u16 wIndex, char *buf, u16 wLength) -{ - struct octeon_hcd *usb = hcd_to_octeon(hcd); - struct device *dev = hcd->self.controller; - struct cvmx_usb_port_status usb_port_status; - int port_status; - struct usb_hub_descriptor *desc; - unsigned long flags; - - switch (typeReq) { - case ClearHubFeature: - dev_dbg(dev, "ClearHubFeature\n"); - switch (wValue) { - case C_HUB_LOCAL_POWER: - case C_HUB_OVER_CURRENT: - /* Nothing required here */ - break; - default: - return -EINVAL; - } - break; - case ClearPortFeature: - dev_dbg(dev, "ClearPortFeature\n"); - if (wIndex != 1) { - dev_dbg(dev, " INVALID\n"); - return -EINVAL; - } - - switch (wValue) { - case USB_PORT_FEAT_ENABLE: - dev_dbg(dev, " ENABLE\n"); - spin_lock_irqsave(&usb->lock, flags); - cvmx_usb_disable(usb); - spin_unlock_irqrestore(&usb->lock, flags); - break; - case USB_PORT_FEAT_SUSPEND: - dev_dbg(dev, " SUSPEND\n"); - /* Not supported on Octeon */ - break; - case USB_PORT_FEAT_POWER: - dev_dbg(dev, " POWER\n"); - /* Not supported on Octeon */ - break; - case USB_PORT_FEAT_INDICATOR: - dev_dbg(dev, " INDICATOR\n"); - /* Port inidicator not supported */ - break; - case USB_PORT_FEAT_C_CONNECTION: - dev_dbg(dev, " C_CONNECTION\n"); - /* Clears drivers internal connect status change flag */ - spin_lock_irqsave(&usb->lock, flags); - usb->port_status = cvmx_usb_get_status(usb); - spin_unlock_irqrestore(&usb->lock, flags); - break; - case USB_PORT_FEAT_C_RESET: - dev_dbg(dev, " C_RESET\n"); - /* - * Clears the driver's internal Port Reset Change flag. - */ - spin_lock_irqsave(&usb->lock, flags); - usb->port_status = cvmx_usb_get_status(usb); - spin_unlock_irqrestore(&usb->lock, flags); - break; - case USB_PORT_FEAT_C_ENABLE: - dev_dbg(dev, " C_ENABLE\n"); - /* - * Clears the driver's internal Port Enable/Disable - * Change flag. - */ - spin_lock_irqsave(&usb->lock, flags); - usb->port_status = cvmx_usb_get_status(usb); - spin_unlock_irqrestore(&usb->lock, flags); - break; - case USB_PORT_FEAT_C_SUSPEND: - dev_dbg(dev, " C_SUSPEND\n"); - /* - * Clears the driver's internal Port Suspend Change - * flag, which is set when resume signaling on the host - * port is complete. - */ - break; - case USB_PORT_FEAT_C_OVER_CURRENT: - dev_dbg(dev, " C_OVER_CURRENT\n"); - /* Clears the driver's overcurrent Change flag */ - spin_lock_irqsave(&usb->lock, flags); - usb->port_status = cvmx_usb_get_status(usb); - spin_unlock_irqrestore(&usb->lock, flags); - break; - default: - dev_dbg(dev, " UNKNOWN\n"); - return -EINVAL; - } - break; - case GetHubDescriptor: - dev_dbg(dev, "GetHubDescriptor\n"); - desc = (struct usb_hub_descriptor *)buf; - desc->bDescLength = 9; - desc->bDescriptorType = 0x29; - desc->bNbrPorts = 1; - desc->wHubCharacteristics = cpu_to_le16(0x08); - desc->bPwrOn2PwrGood = 1; - desc->bHubContrCurrent = 0; - desc->u.hs.DeviceRemovable[0] = 0; - desc->u.hs.DeviceRemovable[1] = 0xff; - break; - case GetHubStatus: - dev_dbg(dev, "GetHubStatus\n"); - *(__le32 *)buf = 0; - break; - case GetPortStatus: - dev_dbg(dev, "GetPortStatus\n"); - if (wIndex != 1) { - dev_dbg(dev, " INVALID\n"); - return -EINVAL; - } - - spin_lock_irqsave(&usb->lock, flags); - usb_port_status = cvmx_usb_get_status(usb); - spin_unlock_irqrestore(&usb->lock, flags); - port_status = 0; - - if (usb_port_status.connect_change) { - port_status |= (1 << USB_PORT_FEAT_C_CONNECTION); - dev_dbg(dev, " C_CONNECTION\n"); - } - - if (usb_port_status.port_enabled) { - port_status |= (1 << USB_PORT_FEAT_C_ENABLE); - dev_dbg(dev, " C_ENABLE\n"); - } - - if (usb_port_status.connected) { - port_status |= (1 << USB_PORT_FEAT_CONNECTION); - dev_dbg(dev, " CONNECTION\n"); - } - - if (usb_port_status.port_enabled) { - port_status |= (1 << USB_PORT_FEAT_ENABLE); - dev_dbg(dev, " ENABLE\n"); - } - - if (usb_port_status.port_over_current) { - port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT); - dev_dbg(dev, " OVER_CURRENT\n"); - } - - if (usb_port_status.port_powered) { - port_status |= (1 << USB_PORT_FEAT_POWER); - dev_dbg(dev, " POWER\n"); - } - - if (usb_port_status.port_speed == CVMX_USB_SPEED_HIGH) { - port_status |= USB_PORT_STAT_HIGH_SPEED; - dev_dbg(dev, " HIGHSPEED\n"); - } else if (usb_port_status.port_speed == CVMX_USB_SPEED_LOW) { - port_status |= (1 << USB_PORT_FEAT_LOWSPEED); - dev_dbg(dev, " LOWSPEED\n"); - } - - *((__le32 *)buf) = cpu_to_le32(port_status); - break; - case SetHubFeature: - dev_dbg(dev, "SetHubFeature\n"); - /* No HUB features supported */ - break; - case SetPortFeature: - dev_dbg(dev, "SetPortFeature\n"); - if (wIndex != 1) { - dev_dbg(dev, " INVALID\n"); - return -EINVAL; - } - - switch (wValue) { - case USB_PORT_FEAT_SUSPEND: - dev_dbg(dev, " SUSPEND\n"); - return -EINVAL; - case USB_PORT_FEAT_POWER: - dev_dbg(dev, " POWER\n"); - /* - * Program the port power bit to drive VBUS on the USB. - */ - spin_lock_irqsave(&usb->lock, flags); - USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), - cvmx_usbcx_hprt, prtpwr, 1); - spin_unlock_irqrestore(&usb->lock, flags); - return 0; - case USB_PORT_FEAT_RESET: - dev_dbg(dev, " RESET\n"); - spin_lock_irqsave(&usb->lock, flags); - cvmx_usb_reset_port(usb); - spin_unlock_irqrestore(&usb->lock, flags); - return 0; - case USB_PORT_FEAT_INDICATOR: - dev_dbg(dev, " INDICATOR\n"); - /* Not supported */ - break; - default: - dev_dbg(dev, " UNKNOWN\n"); - return -EINVAL; - } - break; - default: - dev_dbg(dev, "Unknown root hub request\n"); - return -EINVAL; - } - return 0; -} - -static const struct hc_driver octeon_hc_driver = { - .description = "Octeon USB", - .product_desc = "Octeon Host Controller", - .hcd_priv_size = sizeof(struct octeon_hcd), - .irq = octeon_usb_irq, - .flags = HCD_MEMORY | HCD_DMA | HCD_USB2, - .start = octeon_usb_start, - .stop = octeon_usb_stop, - .urb_enqueue = octeon_usb_urb_enqueue, - .urb_dequeue = octeon_usb_urb_dequeue, - .endpoint_disable = octeon_usb_endpoint_disable, - .get_frame_number = octeon_usb_get_frame_number, - .hub_status_data = octeon_usb_hub_status_data, - .hub_control = octeon_usb_hub_control, - .map_urb_for_dma = octeon_map_urb_for_dma, - .unmap_urb_for_dma = octeon_unmap_urb_for_dma, -}; - -static int octeon_usb_probe(struct platform_device *pdev) -{ - int status; - int initialize_flags; - int usb_num; - struct resource *res_mem; - struct device_node *usbn_node; - int irq = platform_get_irq(pdev, 0); - struct device *dev = &pdev->dev; - struct octeon_hcd *usb; - struct usb_hcd *hcd; - u32 clock_rate = 48000000; - bool is_crystal_clock = false; - const char *clock_type; - int i; - - if (!dev->of_node) { - dev_err(dev, "Error: empty of_node\n"); - return -ENXIO; - } - usbn_node = dev->of_node->parent; - - i = of_property_read_u32(usbn_node, - "clock-frequency", &clock_rate); - if (i) - i = of_property_read_u32(usbn_node, - "refclk-frequency", &clock_rate); - if (i) { - dev_err(dev, "No USBN \"clock-frequency\"\n"); - return -ENXIO; - } - switch (clock_rate) { - case 12000000: - initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ; - break; - case 24000000: - initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ; - break; - case 48000000: - initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ; - break; - default: - dev_err(dev, "Illegal USBN \"clock-frequency\" %u\n", - clock_rate); - return -ENXIO; - } - - i = of_property_read_string(usbn_node, - "cavium,refclk-type", &clock_type); - if (i) - i = of_property_read_string(usbn_node, - "refclk-type", &clock_type); - - if (!i && strcmp("crystal", clock_type) == 0) - is_crystal_clock = true; - - if (is_crystal_clock) - initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI; - else - initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND; - - res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res_mem) { - dev_err(dev, "found no memory resource\n"); - return -ENXIO; - } - usb_num = (res_mem->start >> 44) & 1; - - if (irq < 0) { - /* Defective device tree, but we know how to fix it. */ - irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56; - - irq = irq_create_mapping(NULL, hwirq); - } - - /* - * Set the DMA mask to 64bits so we get buffers already translated for - * DMA. - */ - i = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); - if (i) - return i; - - /* - * Only cn52XX and cn56XX have DWC_OTG USB hardware and the - * IOB priority registers. Under heavy network load USB - * hardware can be starved by the IOB causing a crash. Give - * it a priority boost if it has been waiting more than 400 - * cycles to avoid this situation. - * - * Testing indicates that a cnt_val of 8192 is not sufficient, - * but no failures are seen with 4096. We choose a value of - * 400 to give a safety factor of 10. - */ - if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) { - union cvmx_iob_n2c_l2c_pri_cnt pri_cnt; - - pri_cnt.u64 = 0; - pri_cnt.s.cnt_enb = 1; - pri_cnt.s.cnt_val = 400; - cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64); - } - - hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev)); - if (!hcd) { - dev_dbg(dev, "Failed to allocate memory for HCD\n"); - return -1; - } - hcd->uses_new_polling = 1; - usb = (struct octeon_hcd *)hcd->hcd_priv; - - spin_lock_init(&usb->lock); - - usb->init_flags = initialize_flags; - - /* Initialize the USB state structure */ - usb->index = usb_num; - INIT_LIST_HEAD(&usb->idle_pipes); - for (i = 0; i < ARRAY_SIZE(usb->active_pipes); i++) - INIT_LIST_HEAD(&usb->active_pipes[i]); - - /* Due to an errata, CN31XX doesn't support DMA */ - if (OCTEON_IS_MODEL(OCTEON_CN31XX)) { - usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA; - /* Only use one channel with non DMA */ - usb->idle_hardware_channels = 0x1; - } else if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) { - /* CN5XXX have an errata with channel 3 */ - usb->idle_hardware_channels = 0xf7; - } else { - usb->idle_hardware_channels = 0xff; - } - - status = cvmx_usb_initialize(dev, usb); - if (status) { - dev_dbg(dev, "USB initialization failed with %d\n", status); - usb_put_hcd(hcd); - return -1; - } - - status = usb_add_hcd(hcd, irq, 0); - if (status) { - dev_dbg(dev, "USB add HCD failed with %d\n", status); - usb_put_hcd(hcd); - return -1; - } - device_wakeup_enable(hcd->self.controller); - - dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq); - - return 0; -} - -static int octeon_usb_remove(struct platform_device *pdev) -{ - int status; - struct device *dev = &pdev->dev; - struct usb_hcd *hcd = dev_get_drvdata(dev); - struct octeon_hcd *usb = hcd_to_octeon(hcd); - unsigned long flags; - - usb_remove_hcd(hcd); - spin_lock_irqsave(&usb->lock, flags); - status = cvmx_usb_shutdown(usb); - spin_unlock_irqrestore(&usb->lock, flags); - if (status) - dev_dbg(dev, "USB shutdown failed with %d\n", status); - - usb_put_hcd(hcd); - - return 0; -} - -static const struct of_device_id octeon_usb_match[] = { - { - .compatible = "cavium,octeon-5750-usbc", - }, - {}, -}; -MODULE_DEVICE_TABLE(of, octeon_usb_match); - -static struct platform_driver octeon_usb_driver = { - .driver = { - .name = "octeon-hcd", - .of_match_table = octeon_usb_match, - }, - .probe = octeon_usb_probe, - .remove = octeon_usb_remove, -}; - -static int __init octeon_usb_driver_init(void) -{ - if (usb_disabled()) - return 0; - - return platform_driver_register(&octeon_usb_driver); -} -module_init(octeon_usb_driver_init); - -static void __exit octeon_usb_driver_exit(void) -{ - if (usb_disabled()) - return; - - platform_driver_unregister(&octeon_usb_driver); -} -module_exit(octeon_usb_driver_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>"); -MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver."); diff --git a/drivers/staging/octeon-usb/octeon-hcd.h b/drivers/staging/octeon-usb/octeon-hcd.h deleted file mode 100644 index 9ed619c93a4e..000000000000 --- a/drivers/staging/octeon-usb/octeon-hcd.h +++ /dev/null @@ -1,1847 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Octeon HCD hardware register definitions. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Some parts of the code were originally released under BSD license: - * - * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights - * reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Networks nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its associated - * regulations, and may be subject to export or import regulations in other - * countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR - * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO - * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION - * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM - * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, - * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF - * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR - * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR - * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. - */ - -#ifndef __OCTEON_HCD_H__ -#define __OCTEON_HCD_H__ - -#include <asm/bitfield.h> - -#define CVMX_USBCXBASE 0x00016F0010000000ull -#define CVMX_USBCXREG1(reg, bid) \ - (CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \ - ((bid) & 1) * 0x100000000000ull) -#define CVMX_USBCXREG2(reg, bid, off) \ - (CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \ - (((off) & 7) + ((bid) & 1) * 0x8000000000ull) * 32) - -#define CVMX_USBCX_GAHBCFG(bid) CVMX_USBCXREG1(0x008, bid) -#define CVMX_USBCX_GHWCFG3(bid) CVMX_USBCXREG1(0x04c, bid) -#define CVMX_USBCX_GINTMSK(bid) CVMX_USBCXREG1(0x018, bid) -#define CVMX_USBCX_GINTSTS(bid) CVMX_USBCXREG1(0x014, bid) -#define CVMX_USBCX_GNPTXFSIZ(bid) CVMX_USBCXREG1(0x028, bid) -#define CVMX_USBCX_GNPTXSTS(bid) CVMX_USBCXREG1(0x02c, bid) -#define CVMX_USBCX_GOTGCTL(bid) CVMX_USBCXREG1(0x000, bid) -#define CVMX_USBCX_GRSTCTL(bid) CVMX_USBCXREG1(0x010, bid) -#define CVMX_USBCX_GRXFSIZ(bid) CVMX_USBCXREG1(0x024, bid) -#define CVMX_USBCX_GRXSTSPH(bid) CVMX_USBCXREG1(0x020, bid) -#define CVMX_USBCX_GUSBCFG(bid) CVMX_USBCXREG1(0x00c, bid) -#define CVMX_USBCX_HAINT(bid) CVMX_USBCXREG1(0x414, bid) -#define CVMX_USBCX_HAINTMSK(bid) CVMX_USBCXREG1(0x418, bid) -#define CVMX_USBCX_HCCHARX(off, bid) CVMX_USBCXREG2(0x500, bid, off) -#define CVMX_USBCX_HCFG(bid) CVMX_USBCXREG1(0x400, bid) -#define CVMX_USBCX_HCINTMSKX(off, bid) CVMX_USBCXREG2(0x50c, bid, off) -#define CVMX_USBCX_HCINTX(off, bid) CVMX_USBCXREG2(0x508, bid, off) -#define CVMX_USBCX_HCSPLTX(off, bid) CVMX_USBCXREG2(0x504, bid, off) -#define CVMX_USBCX_HCTSIZX(off, bid) CVMX_USBCXREG2(0x510, bid, off) -#define CVMX_USBCX_HFIR(bid) CVMX_USBCXREG1(0x404, bid) -#define CVMX_USBCX_HFNUM(bid) CVMX_USBCXREG1(0x408, bid) -#define CVMX_USBCX_HPRT(bid) CVMX_USBCXREG1(0x440, bid) -#define CVMX_USBCX_HPTXFSIZ(bid) CVMX_USBCXREG1(0x100, bid) -#define CVMX_USBCX_HPTXSTS(bid) CVMX_USBCXREG1(0x410, bid) - -#define CVMX_USBNXBID1(bid) (((bid) & 1) * 0x10000000ull) -#define CVMX_USBNXBID2(bid) (((bid) & 1) * 0x100000000000ull) - -#define CVMX_USBNXREG1(reg, bid) \ - (CVMX_ADD_IO_SEG(0x0001180068000000ull | reg) + CVMX_USBNXBID1(bid)) -#define CVMX_USBNXREG2(reg, bid) \ - (CVMX_ADD_IO_SEG(0x00016F0000000000ull | reg) + CVMX_USBNXBID2(bid)) - -#define CVMX_USBNX_CLK_CTL(bid) CVMX_USBNXREG1(0x10, bid) -#define CVMX_USBNX_DMA0_INB_CHN0(bid) CVMX_USBNXREG2(0x818, bid) -#define CVMX_USBNX_DMA0_OUTB_CHN0(bid) CVMX_USBNXREG2(0x858, bid) -#define CVMX_USBNX_USBP_CTL_STATUS(bid) CVMX_USBNXREG1(0x18, bid) - -/** - * cvmx_usbc#_gahbcfg - * - * Core AHB Configuration Register (GAHBCFG) - * - * This register can be used to configure the core after power-on or a change in - * mode of operation. This register mainly contains AHB system-related - * configuration parameters. The AHB is the processor interface to the O2P USB - * core. In general, software need not know about this interface except to - * program the values as specified. - * - * The application must program this register as part of the O2P USB core - * initialization. Do not change this register after the initial programming. - */ -union cvmx_usbcx_gahbcfg { - u32 u32; - /** - * struct cvmx_usbcx_gahbcfg_s - * @ptxfemplvl: Periodic TxFIFO Empty Level (PTxFEmpLvl) - * Software should set this bit to 0x1. - * Indicates when the Periodic TxFIFO Empty Interrupt bit in the - * Core Interrupt register (GINTSTS.PTxFEmp) is triggered. This - * bit is used only in Slave mode. - * * 1'b0: GINTSTS.PTxFEmp interrupt indicates that the Periodic - * TxFIFO is half empty - * * 1'b1: GINTSTS.PTxFEmp interrupt indicates that the Periodic - * TxFIFO is completely empty - * @nptxfemplvl: Non-Periodic TxFIFO Empty Level (NPTxFEmpLvl) - * Software should set this bit to 0x1. - * Indicates when the Non-Periodic TxFIFO Empty Interrupt bit in - * the Core Interrupt register (GINTSTS.NPTxFEmp) is triggered. - * This bit is used only in Slave mode. - * * 1'b0: GINTSTS.NPTxFEmp interrupt indicates that the Non- - * Periodic TxFIFO is half empty - * * 1'b1: GINTSTS.NPTxFEmp interrupt indicates that the Non- - * Periodic TxFIFO is completely empty - * @dmaen: DMA Enable (DMAEn) - * * 1'b0: Core operates in Slave mode - * * 1'b1: Core operates in a DMA mode - * @hbstlen: Burst Length/Type (HBstLen) - * This field has not effect and should be left as 0x0. - * @glblintrmsk: Global Interrupt Mask (GlblIntrMsk) - * Software should set this field to 0x1. - * The application uses this bit to mask or unmask the interrupt - * line assertion to itself. Irrespective of this bit's setting, - * the interrupt status registers are updated by the core. - * * 1'b0: Mask the interrupt assertion to the application. - * * 1'b1: Unmask the interrupt assertion to the application. - */ - struct cvmx_usbcx_gahbcfg_s { - __BITFIELD_FIELD(u32 reserved_9_31 : 23, - __BITFIELD_FIELD(u32 ptxfemplvl : 1, - __BITFIELD_FIELD(u32 nptxfemplvl : 1, - __BITFIELD_FIELD(u32 reserved_6_6 : 1, - __BITFIELD_FIELD(u32 dmaen : 1, - __BITFIELD_FIELD(u32 hbstlen : 4, - __BITFIELD_FIELD(u32 glblintrmsk : 1, - ;))))))) - } s; -}; - -/** - * cvmx_usbc#_ghwcfg3 - * - * User HW Config3 Register (GHWCFG3) - * - * This register contains the configuration options of the O2P USB core. - */ -union cvmx_usbcx_ghwcfg3 { - u32 u32; - /** - * struct cvmx_usbcx_ghwcfg3_s - * @dfifodepth: DFIFO Depth (DfifoDepth) - * This value is in terms of 32-bit words. - * * Minimum value is 32 - * * Maximum value is 32768 - * @ahbphysync: AHB and PHY Synchronous (AhbPhySync) - * Indicates whether AHB and PHY clocks are synchronous to - * each other. - * * 1'b0: No - * * 1'b1: Yes - * This bit is tied to 1. - * @rsttype: Reset Style for Clocked always Blocks in RTL (RstType) - * * 1'b0: Asynchronous reset is used in the core - * * 1'b1: Synchronous reset is used in the core - * @optfeature: Optional Features Removed (OptFeature) - * Indicates whether the User ID register, GPIO interface ports, - * and SOF toggle and counter ports were removed for gate count - * optimization. - * @vendor_control_interface_support: Vendor Control Interface Support - * * 1'b0: Vendor Control Interface is not available on the core. - * * 1'b1: Vendor Control Interface is available. - * @i2c_selection: I2C Selection - * * 1'b0: I2C Interface is not available on the core. - * * 1'b1: I2C Interface is available on the core. - * @otgen: OTG Function Enabled (OtgEn) - * The application uses this bit to indicate the O2P USB core's - * OTG capabilities. - * * 1'b0: Not OTG capable - * * 1'b1: OTG Capable - * @pktsizewidth: Width of Packet Size Counters (PktSizeWidth) - * * 3'b000: 4 bits - * * 3'b001: 5 bits - * * 3'b010: 6 bits - * * 3'b011: 7 bits - * * 3'b100: 8 bits - * * 3'b101: 9 bits - * * 3'b110: 10 bits - * * Others: Reserved - * @xfersizewidth: Width of Transfer Size Counters (XferSizeWidth) - * * 4'b0000: 11 bits - * * 4'b0001: 12 bits - * - ... - * * 4'b1000: 19 bits - * * Others: Reserved - */ - struct cvmx_usbcx_ghwcfg3_s { - __BITFIELD_FIELD(u32 dfifodepth : 16, - __BITFIELD_FIELD(u32 reserved_13_15 : 3, - __BITFIELD_FIELD(u32 ahbphysync : 1, - __BITFIELD_FIELD(u32 rsttype : 1, - __BITFIELD_FIELD(u32 optfeature : 1, - __BITFIELD_FIELD(u32 vendor_control_interface_support : 1, - __BITFIELD_FIELD(u32 i2c_selection : 1, - __BITFIELD_FIELD(u32 otgen : 1, - __BITFIELD_FIELD(u32 pktsizewidth : 3, - __BITFIELD_FIELD(u32 xfersizewidth : 4, - ;)))))))))) - } s; -}; - -/** - * cvmx_usbc#_gintmsk - * - * Core Interrupt Mask Register (GINTMSK) - * - * This register works with the Core Interrupt register to interrupt the - * application. When an interrupt bit is masked, the interrupt associated with - * that bit will not be generated. However, the Core Interrupt (GINTSTS) - * register bit corresponding to that interrupt will still be set. - * Mask interrupt: 1'b0, Unmask interrupt: 1'b1 - */ -union cvmx_usbcx_gintmsk { - u32 u32; - /** - * struct cvmx_usbcx_gintmsk_s - * @wkupintmsk: Resume/Remote Wakeup Detected Interrupt Mask - * (WkUpIntMsk) - * @sessreqintmsk: Session Request/New Session Detected Interrupt Mask - * (SessReqIntMsk) - * @disconnintmsk: Disconnect Detected Interrupt Mask (DisconnIntMsk) - * @conidstschngmsk: Connector ID Status Change Mask (ConIDStsChngMsk) - * @ptxfempmsk: Periodic TxFIFO Empty Mask (PTxFEmpMsk) - * @hchintmsk: Host Channels Interrupt Mask (HChIntMsk) - * @prtintmsk: Host Port Interrupt Mask (PrtIntMsk) - * @fetsuspmsk: Data Fetch Suspended Mask (FetSuspMsk) - * @incomplpmsk: Incomplete Periodic Transfer Mask (incomplPMsk) - * Incomplete Isochronous OUT Transfer Mask - * (incompISOOUTMsk) - * @incompisoinmsk: Incomplete Isochronous IN Transfer Mask - * (incompISOINMsk) - * @oepintmsk: OUT Endpoints Interrupt Mask (OEPIntMsk) - * @inepintmsk: IN Endpoints Interrupt Mask (INEPIntMsk) - * @epmismsk: Endpoint Mismatch Interrupt Mask (EPMisMsk) - * @eopfmsk: End of Periodic Frame Interrupt Mask (EOPFMsk) - * @isooutdropmsk: Isochronous OUT Packet Dropped Interrupt Mask - * (ISOOutDropMsk) - * @enumdonemsk: Enumeration Done Mask (EnumDoneMsk) - * @usbrstmsk: USB Reset Mask (USBRstMsk) - * @usbsuspmsk: USB Suspend Mask (USBSuspMsk) - * @erlysuspmsk: Early Suspend Mask (ErlySuspMsk) - * @i2cint: I2C Interrupt Mask (I2CINT) - * @ulpickintmsk: ULPI Carkit Interrupt Mask (ULPICKINTMsk) - * I2C Carkit Interrupt Mask (I2CCKINTMsk) - * @goutnakeffmsk: Global OUT NAK Effective Mask (GOUTNakEffMsk) - * @ginnakeffmsk: Global Non-Periodic IN NAK Effective Mask - * (GINNakEffMsk) - * @nptxfempmsk: Non-Periodic TxFIFO Empty Mask (NPTxFEmpMsk) - * @rxflvlmsk: Receive FIFO Non-Empty Mask (RxFLvlMsk) - * @sofmsk: Start of (micro)Frame Mask (SofMsk) - * @otgintmsk: OTG Interrupt Mask (OTGIntMsk) - * @modemismsk: Mode Mismatch Interrupt Mask (ModeMisMsk) - */ - struct cvmx_usbcx_gintmsk_s { - __BITFIELD_FIELD(u32 wkupintmsk : 1, - __BITFIELD_FIELD(u32 sessreqintmsk : 1, - __BITFIELD_FIELD(u32 disconnintmsk : 1, - __BITFIELD_FIELD(u32 conidstschngmsk : 1, - __BITFIELD_FIELD(u32 reserved_27_27 : 1, - __BITFIELD_FIELD(u32 ptxfempmsk : 1, - __BITFIELD_FIELD(u32 hchintmsk : 1, - __BITFIELD_FIELD(u32 prtintmsk : 1, - __BITFIELD_FIELD(u32 reserved_23_23 : 1, - __BITFIELD_FIELD(u32 fetsuspmsk : 1, - __BITFIELD_FIELD(u32 incomplpmsk : 1, - __BITFIELD_FIELD(u32 incompisoinmsk : 1, - __BITFIELD_FIELD(u32 oepintmsk : 1, - __BITFIELD_FIELD(u32 inepintmsk : 1, - __BITFIELD_FIELD(u32 epmismsk : 1, - __BITFIELD_FIELD(u32 reserved_16_16 : 1, - __BITFIELD_FIELD(u32 eopfmsk : 1, - __BITFIELD_FIELD(u32 isooutdropmsk : 1, - __BITFIELD_FIELD(u32 enumdonemsk : 1, - __BITFIELD_FIELD(u32 usbrstmsk : 1, - __BITFIELD_FIELD(u32 usbsuspmsk : 1, - __BITFIELD_FIELD(u32 erlysuspmsk : 1, - __BITFIELD_FIELD(u32 i2cint : 1, - __BITFIELD_FIELD(u32 ulpickintmsk : 1, - __BITFIELD_FIELD(u32 goutnakeffmsk : 1, - __BITFIELD_FIELD(u32 ginnakeffmsk : 1, - __BITFIELD_FIELD(u32 nptxfempmsk : 1, - __BITFIELD_FIELD(u32 rxflvlmsk : 1, - __BITFIELD_FIELD(u32 sofmsk : 1, - __BITFIELD_FIELD(u32 otgintmsk : 1, - __BITFIELD_FIELD(u32 modemismsk : 1, - __BITFIELD_FIELD(u32 reserved_0_0 : 1, - ;)))))))))))))))))))))))))))))))) - } s; -}; - -/** - * cvmx_usbc#_gintsts - * - * Core Interrupt Register (GINTSTS) - * - * This register interrupts the application for system-level events in the - * current mode of operation (Device mode or Host mode). It is shown in - * Interrupt. Some of the bits in this register are valid only in Host mode, - * while others are valid in Device mode only. This register also indicates the - * current mode of operation. In order to clear the interrupt status bits of - * type R_SS_WC, the application must write 1'b1 into the bit. The FIFO status - * interrupts are read only; once software reads from or writes to the FIFO - * while servicing these interrupts, FIFO interrupt conditions are cleared - * automatically. - */ -union cvmx_usbcx_gintsts { - u32 u32; - /** - * struct cvmx_usbcx_gintsts_s - * @wkupint: Resume/Remote Wakeup Detected Interrupt (WkUpInt) - * In Device mode, this interrupt is asserted when a resume is - * detected on the USB. In Host mode, this interrupt is asserted - * when a remote wakeup is detected on the USB. - * For more information on how to use this interrupt, see "Partial - * Power-Down and Clock Gating Programming Model" on - * page 353. - * @sessreqint: Session Request/New Session Detected Interrupt - * (SessReqInt) - * In Host mode, this interrupt is asserted when a session request - * is detected from the device. In Device mode, this interrupt is - * asserted when the utmiotg_bvalid signal goes high. - * For more information on how to use this interrupt, see "Partial - * Power-Down and Clock Gating Programming Model" on - * page 353. - * @disconnint: Disconnect Detected Interrupt (DisconnInt) - * Asserted when a device disconnect is detected. - * @conidstschng: Connector ID Status Change (ConIDStsChng) - * The core sets this bit when there is a change in connector ID - * status. - * @ptxfemp: Periodic TxFIFO Empty (PTxFEmp) - * Asserted when the Periodic Transmit FIFO is either half or - * completely empty and there is space for at least one entry to be - * written in the Periodic Request Queue. The half or completely - * empty status is determined by the Periodic TxFIFO Empty Level - * bit in the Core AHB Configuration register - * (GAHBCFG.PTxFEmpLvl). - * @hchint: Host Channels Interrupt (HChInt) - * The core sets this bit to indicate that an interrupt is pending - * on one of the channels of the core (in Host mode). The - * application must read the Host All Channels Interrupt (HAINT) - * register to determine the exact number of the channel on which - * the interrupt occurred, and then read the corresponding Host - * Channel-n Interrupt (HCINTn) register to determine the exact - * cause of the interrupt. The application must clear the - * appropriate status bit in the HCINTn register to clear this bit. - * @prtint: Host Port Interrupt (PrtInt) - * The core sets this bit to indicate a change in port status of - * one of the O2P USB core ports in Host mode. The application must - * read the Host Port Control and Status (HPRT) register to - * determine the exact event that caused this interrupt. The - * application must clear the appropriate status bit in the Host - * Port Control and Status register to clear this bit. - * @fetsusp: Data Fetch Suspended (FetSusp) - * This interrupt is valid only in DMA mode. This interrupt - * indicates that the core has stopped fetching data for IN - * endpoints due to the unavailability of TxFIFO space or Request - * Queue space. This interrupt is used by the application for an - * endpoint mismatch algorithm. - * @incomplp: Incomplete Periodic Transfer (incomplP) - * In Host mode, the core sets this interrupt bit when there are - * incomplete periodic transactions still pending which are - * scheduled for the current microframe. - * Incomplete Isochronous OUT Transfer (incompISOOUT) - * The Device mode, the core sets this interrupt to indicate that - * there is at least one isochronous OUT endpoint on which the - * transfer is not completed in the current microframe. This - * interrupt is asserted along with the End of Periodic Frame - * Interrupt (EOPF) bit in this register. - * @incompisoin: Incomplete Isochronous IN Transfer (incompISOIN) - * The core sets this interrupt to indicate that there is at least - * one isochronous IN endpoint on which the transfer is not - * completed in the current microframe. This interrupt is asserted - * along with the End of Periodic Frame Interrupt (EOPF) bit in - * this register. - * @oepint: OUT Endpoints Interrupt (OEPInt) - * The core sets this bit to indicate that an interrupt is pending - * on one of the OUT endpoints of the core (in Device mode). The - * application must read the Device All Endpoints Interrupt - * (DAINT) register to determine the exact number of the OUT - * endpoint on which the interrupt occurred, and then read the - * corresponding Device OUT Endpoint-n Interrupt (DOEPINTn) - * register to determine the exact cause of the interrupt. The - * application must clear the appropriate status bit in the - * corresponding DOEPINTn register to clear this bit. - * @iepint: IN Endpoints Interrupt (IEPInt) - * The core sets this bit to indicate that an interrupt is pending - * on one of the IN endpoints of the core (in Device mode). The - * application must read the Device All Endpoints Interrupt - * (DAINT) register to determine the exact number of the IN - * endpoint on which the interrupt occurred, and then read the - * corresponding Device IN Endpoint-n Interrupt (DIEPINTn) - * register to determine the exact cause of the interrupt. The - * application must clear the appropriate status bit in the - * corresponding DIEPINTn register to clear this bit. - * @epmis: Endpoint Mismatch Interrupt (EPMis) - * Indicates that an IN token has been received for a non-periodic - * endpoint, but the data for another endpoint is present in the - * top of the Non-Periodic Transmit FIFO and the IN endpoint - * mismatch count programmed by the application has expired. - * @eopf: End of Periodic Frame Interrupt (EOPF) - * Indicates that the period specified in the Periodic Frame - * Interval field of the Device Configuration register - * (DCFG.PerFrInt) has been reached in the current microframe. - * @isooutdrop: Isochronous OUT Packet Dropped Interrupt (ISOOutDrop) - * The core sets this bit when it fails to write an isochronous OUT - * packet into the RxFIFO because the RxFIFO doesn't have - * enough space to accommodate a maximum packet size packet - * for the isochronous OUT endpoint. - * @enumdone: Enumeration Done (EnumDone) - * The core sets this bit to indicate that speed enumeration is - * complete. The application must read the Device Status (DSTS) - * register to obtain the enumerated speed. - * @usbrst: USB Reset (USBRst) - * The core sets this bit to indicate that a reset is detected on - * the USB. - * @usbsusp: USB Suspend (USBSusp) - * The core sets this bit to indicate that a suspend was detected - * on the USB. The core enters the Suspended state when there - * is no activity on the phy_line_state_i signal for an extended - * period of time. - * @erlysusp: Early Suspend (ErlySusp) - * The core sets this bit to indicate that an Idle state has been - * detected on the USB for 3 ms. - * @i2cint: I2C Interrupt (I2CINT) - * This bit is always 0x0. - * @ulpickint: ULPI Carkit Interrupt (ULPICKINT) - * This bit is always 0x0. - * @goutnakeff: Global OUT NAK Effective (GOUTNakEff) - * Indicates that the Set Global OUT NAK bit in the Device Control - * register (DCTL.SGOUTNak), set by the application, has taken - * effect in the core. This bit can be cleared by writing the Clear - * Global OUT NAK bit in the Device Control register - * (DCTL.CGOUTNak). - * @ginnakeff: Global IN Non-Periodic NAK Effective (GINNakEff) - * Indicates that the Set Global Non-Periodic IN NAK bit in the - * Device Control register (DCTL.SGNPInNak), set by the - * application, has taken effect in the core. That is, the core has - * sampled the Global IN NAK bit set by the application. This bit - * can be cleared by clearing the Clear Global Non-Periodic IN - * NAK bit in the Device Control register (DCTL.CGNPInNak). - * This interrupt does not necessarily mean that a NAK handshake - * is sent out on the USB. The STALL bit takes precedence over - * the NAK bit. - * @nptxfemp: Non-Periodic TxFIFO Empty (NPTxFEmp) - * This interrupt is asserted when the Non-Periodic TxFIFO is - * either half or completely empty, and there is space for at least - * one entry to be written to the Non-Periodic Transmit Request - * Queue. The half or completely empty status is determined by - * the Non-Periodic TxFIFO Empty Level bit in the Core AHB - * Configuration register (GAHBCFG.NPTxFEmpLvl). - * @rxflvl: RxFIFO Non-Empty (RxFLvl) - * Indicates that there is at least one packet pending to be read - * from the RxFIFO. - * @sof: Start of (micro)Frame (Sof) - * In Host mode, the core sets this bit to indicate that an SOF - * (FS), micro-SOF (HS), or Keep-Alive (LS) is transmitted on the - * USB. The application must write a 1 to this bit to clear the - * interrupt. - * In Device mode, in the core sets this bit to indicate that an - * SOF token has been received on the USB. The application can read - * the Device Status register to get the current (micro)frame - * number. This interrupt is seen only when the core is operating - * at either HS or FS. - * @otgint: OTG Interrupt (OTGInt) - * The core sets this bit to indicate an OTG protocol event. The - * application must read the OTG Interrupt Status (GOTGINT) - * register to determine the exact event that caused this - * interrupt. The application must clear the appropriate status bit - * in the GOTGINT register to clear this bit. - * @modemis: Mode Mismatch Interrupt (ModeMis) - * The core sets this bit when the application is trying to access: - * * A Host mode register, when the core is operating in Device - * mode - * * A Device mode register, when the core is operating in Host - * mode - * The register access is completed on the AHB with an OKAY - * response, but is ignored by the core internally and doesn't - * affect the operation of the core. - * @curmod: Current Mode of Operation (CurMod) - * Indicates the current mode of operation. - * * 1'b0: Device mode - * * 1'b1: Host mode - */ - struct cvmx_usbcx_gintsts_s { - __BITFIELD_FIELD(u32 wkupint : 1, - __BITFIELD_FIELD(u32 sessreqint : 1, - __BITFIELD_FIELD(u32 disconnint : 1, - __BITFIELD_FIELD(u32 conidstschng : 1, - __BITFIELD_FIELD(u32 reserved_27_27 : 1, - __BITFIELD_FIELD(u32 ptxfemp : 1, - __BITFIELD_FIELD(u32 hchint : 1, - __BITFIELD_FIELD(u32 prtint : 1, - __BITFIELD_FIELD(u32 reserved_23_23 : 1, - __BITFIELD_FIELD(u32 fetsusp : 1, - __BITFIELD_FIELD(u32 incomplp : 1, - __BITFIELD_FIELD(u32 incompisoin : 1, - __BITFIELD_FIELD(u32 oepint : 1, - __BITFIELD_FIELD(u32 iepint : 1, - __BITFIELD_FIELD(u32 epmis : 1, - __BITFIELD_FIELD(u32 reserved_16_16 : 1, - __BITFIELD_FIELD(u32 eopf : 1, - __BITFIELD_FIELD(u32 isooutdrop : 1, - __BITFIELD_FIELD(u32 enumdone : 1, - __BITFIELD_FIELD(u32 usbrst : 1, - __BITFIELD_FIELD(u32 usbsusp : 1, - __BITFIELD_FIELD(u32 erlysusp : 1, - __BITFIELD_FIELD(u32 i2cint : 1, - __BITFIELD_FIELD(u32 ulpickint : 1, - __BITFIELD_FIELD(u32 goutnakeff : 1, - __BITFIELD_FIELD(u32 ginnakeff : 1, - __BITFIELD_FIELD(u32 nptxfemp : 1, - __BITFIELD_FIELD(u32 rxflvl : 1, - __BITFIELD_FIELD(u32 sof : 1, - __BITFIELD_FIELD(u32 otgint : 1, - __BITFIELD_FIELD(u32 modemis : 1, - __BITFIELD_FIELD(u32 curmod : 1, - ;)))))))))))))))))))))))))))))))) - } s; -}; - -/** - * cvmx_usbc#_gnptxfsiz - * - * Non-Periodic Transmit FIFO Size Register (GNPTXFSIZ) - * - * The application can program the RAM size and the memory start address for the - * Non-Periodic TxFIFO. - */ -union cvmx_usbcx_gnptxfsiz { - u32 u32; - /** - * struct cvmx_usbcx_gnptxfsiz_s - * @nptxfdep: Non-Periodic TxFIFO Depth (NPTxFDep) - * This value is in terms of 32-bit words. - * Minimum value is 16 - * Maximum value is 32768 - * @nptxfstaddr: Non-Periodic Transmit RAM Start Address (NPTxFStAddr) - * This field contains the memory start address for Non-Periodic - * Transmit FIFO RAM. - */ - struct cvmx_usbcx_gnptxfsiz_s { - __BITFIELD_FIELD(u32 nptxfdep : 16, - __BITFIELD_FIELD(u32 nptxfstaddr : 16, - ;)) - } s; -}; - -/** - * cvmx_usbc#_gnptxsts - * - * Non-Periodic Transmit FIFO/Queue Status Register (GNPTXSTS) - * - * This read-only register contains the free space information for the - * Non-Periodic TxFIFO and the Non-Periodic Transmit Request Queue. - */ -union cvmx_usbcx_gnptxsts { - u32 u32; - /** - * struct cvmx_usbcx_gnptxsts_s - * @nptxqtop: Top of the Non-Periodic Transmit Request Queue (NPTxQTop) - * Entry in the Non-Periodic Tx Request Queue that is currently - * being processed by the MAC. - * * Bits [30:27]: Channel/endpoint number - * * Bits [26:25]: - * - 2'b00: IN/OUT token - * - 2'b01: Zero-length transmit packet (device IN/host OUT) - * - 2'b10: PING/CSPLIT token - * - 2'b11: Channel halt command - * * Bit [24]: Terminate (last entry for selected channel/endpoint) - * @nptxqspcavail: Non-Periodic Transmit Request Queue Space Available - * (NPTxQSpcAvail) - * Indicates the amount of free space available in the Non- - * Periodic Transmit Request Queue. This queue holds both IN - * and OUT requests in Host mode. Device mode has only IN - * requests. - * * 8'h0: Non-Periodic Transmit Request Queue is full - * * 8'h1: 1 location available - * * 8'h2: 2 locations available - * * n: n locations available (0..8) - * * Others: Reserved - * @nptxfspcavail: Non-Periodic TxFIFO Space Avail (NPTxFSpcAvail) - * Indicates the amount of free space available in the Non- - * Periodic TxFIFO. - * Values are in terms of 32-bit words. - * * 16'h0: Non-Periodic TxFIFO is full - * * 16'h1: 1 word available - * * 16'h2: 2 words available - * * 16'hn: n words available (where 0..32768) - * * 16'h8000: 32768 words available - * * Others: Reserved - */ - struct cvmx_usbcx_gnptxsts_s { - __BITFIELD_FIELD(u32 reserved_31_31 : 1, - __BITFIELD_FIELD(u32 nptxqtop : 7, - __BITFIELD_FIELD(u32 nptxqspcavail : 8, - __BITFIELD_FIELD(u32 nptxfspcavail : 16, - ;)))) - } s; -}; - -/** - * cvmx_usbc#_grstctl - * - * Core Reset Register (GRSTCTL) - * - * The application uses this register to reset various hardware features inside - * the core. - */ -union cvmx_usbcx_grstctl { - u32 u32; - /** - * struct cvmx_usbcx_grstctl_s - * @ahbidle: AHB Master Idle (AHBIdle) - * Indicates that the AHB Master State Machine is in the IDLE - * condition. - * @dmareq: DMA Request Signal (DMAReq) - * Indicates that the DMA request is in progress. Used for debug. - * @txfnum: TxFIFO Number (TxFNum) - * This is the FIFO number that must be flushed using the TxFIFO - * Flush bit. This field must not be changed until the core clears - * the TxFIFO Flush bit. - * * 5'h0: Non-Periodic TxFIFO flush - * * 5'h1: Periodic TxFIFO 1 flush in Device mode or Periodic - * TxFIFO flush in Host mode - * * 5'h2: Periodic TxFIFO 2 flush in Device mode - * - ... - * * 5'hF: Periodic TxFIFO 15 flush in Device mode - * * 5'h10: Flush all the Periodic and Non-Periodic TxFIFOs in the - * core - * @txfflsh: TxFIFO Flush (TxFFlsh) - * This bit selectively flushes a single or all transmit FIFOs, but - * cannot do so if the core is in the midst of a transaction. - * The application must only write this bit after checking that the - * core is neither writing to the TxFIFO nor reading from the - * TxFIFO. - * The application must wait until the core clears this bit before - * performing any operations. This bit takes 8 clocks (of phy_clk - * or hclk, whichever is slower) to clear. - * @rxfflsh: RxFIFO Flush (RxFFlsh) - * The application can flush the entire RxFIFO using this bit, but - * must first ensure that the core is not in the middle of a - * transaction. - * The application must only write to this bit after checking that - * the core is neither reading from the RxFIFO nor writing to the - * RxFIFO. - * The application must wait until the bit is cleared before - * performing any other operations. This bit will take 8 clocks - * (slowest of PHY or AHB clock) to clear. - * @intknqflsh: IN Token Sequence Learning Queue Flush (INTknQFlsh) - * The application writes this bit to flush the IN Token Sequence - * Learning Queue. - * @frmcntrrst: Host Frame Counter Reset (FrmCntrRst) - * The application writes this bit to reset the (micro)frame number - * counter inside the core. When the (micro)frame counter is reset, - * the subsequent SOF sent out by the core will have a - * (micro)frame number of 0. - * @hsftrst: HClk Soft Reset (HSftRst) - * The application uses this bit to flush the control logic in the - * AHB Clock domain. Only AHB Clock Domain pipelines are reset. - * * FIFOs are not flushed with this bit. - * * All state machines in the AHB clock domain are reset to the - * Idle state after terminating the transactions on the AHB, - * following the protocol. - * * CSR control bits used by the AHB clock domain state - * machines are cleared. - * * To clear this interrupt, status mask bits that control the - * interrupt status and are generated by the AHB clock domain - * state machine are cleared. - * * Because interrupt status bits are not cleared, the application - * can get the status of any core events that occurred after it set - * this bit. - * This is a self-clearing bit that the core clears after all - * necessary logic is reset in the core. This may take several - * clocks, depending on the core's current state. - * @csftrst: Core Soft Reset (CSftRst) - * Resets the hclk and phy_clock domains as follows: - * * Clears the interrupts and all the CSR registers except the - * following register bits: - * - PCGCCTL.RstPdwnModule - * - PCGCCTL.GateHclk - * - PCGCCTL.PwrClmp - * - PCGCCTL.StopPPhyLPwrClkSelclk - * - GUSBCFG.PhyLPwrClkSel - * - GUSBCFG.DDRSel - * - GUSBCFG.PHYSel - * - GUSBCFG.FSIntf - * - GUSBCFG.ULPI_UTMI_Sel - * - GUSBCFG.PHYIf - * - HCFG.FSLSPclkSel - * - DCFG.DevSpd - * * All module state machines (except the AHB Slave Unit) are - * reset to the IDLE state, and all the transmit FIFOs and the - * receive FIFO are flushed. - * * Any transactions on the AHB Master are terminated as soon - * as possible, after gracefully completing the last data phase of - * an AHB transfer. Any transactions on the USB are terminated - * immediately. - * The application can write to this bit any time it wants to reset - * the core. This is a self-clearing bit and the core clears this - * bit after all the necessary logic is reset in the core, which - * may take several clocks, depending on the current state of the - * core. Once this bit is cleared software should wait at least 3 - * PHY clocks before doing any access to the PHY domain - * (synchronization delay). Software should also should check that - * bit 31 of this register is 1 (AHB Master is IDLE) before - * starting any operation. - * Typically software reset is used during software development - * and also when you dynamically change the PHY selection bits - * in the USB configuration registers listed above. When you - * change the PHY, the corresponding clock for the PHY is - * selected and used in the PHY domain. Once a new clock is - * selected, the PHY domain has to be reset for proper operation. - */ - struct cvmx_usbcx_grstctl_s { - __BITFIELD_FIELD(u32 ahbidle : 1, - __BITFIELD_FIELD(u32 dmareq : 1, - __BITFIELD_FIELD(u32 reserved_11_29 : 19, - __BITFIELD_FIELD(u32 txfnum : 5, - __BITFIELD_FIELD(u32 txfflsh : 1, - __BITFIELD_FIELD(u32 rxfflsh : 1, - __BITFIELD_FIELD(u32 intknqflsh : 1, - __BITFIELD_FIELD(u32 frmcntrrst : 1, - __BITFIELD_FIELD(u32 hsftrst : 1, - __BITFIELD_FIELD(u32 csftrst : 1, - ;)))))))))) - } s; -}; - -/** - * cvmx_usbc#_grxfsiz - * - * Receive FIFO Size Register (GRXFSIZ) - * - * The application can program the RAM size that must be allocated to the - * RxFIFO. - */ -union cvmx_usbcx_grxfsiz { - u32 u32; - /** - * struct cvmx_usbcx_grxfsiz_s - * @rxfdep: RxFIFO Depth (RxFDep) - * This value is in terms of 32-bit words. - * * Minimum value is 16 - * * Maximum value is 32768 - */ - struct cvmx_usbcx_grxfsiz_s { - __BITFIELD_FIELD(u32 reserved_16_31 : 16, - __BITFIELD_FIELD(u32 rxfdep : 16, - ;)) - } s; -}; - -/** - * cvmx_usbc#_grxstsph - * - * Receive Status Read and Pop Register, Host Mode (GRXSTSPH) - * - * A read to the Receive Status Read and Pop register returns and additionally - * pops the top data entry out of the RxFIFO. - * This Description is only valid when the core is in Host Mode. For Device Mode - * use USBC_GRXSTSPD instead. - * NOTE: GRXSTSPH and GRXSTSPD are physically the same register and share the - * same offset in the O2P USB core. The offset difference shown in this - * document is for software clarity and is actually ignored by the - * hardware. - */ -union cvmx_usbcx_grxstsph { - u32 u32; - /** - * struct cvmx_usbcx_grxstsph_s - * @pktsts: Packet Status (PktSts) - * Indicates the status of the received packet - * * 4'b0010: IN data packet received - * * 4'b0011: IN transfer completed (triggers an interrupt) - * * 4'b0101: Data toggle error (triggers an interrupt) - * * 4'b0111: Channel halted (triggers an interrupt) - * * Others: Reserved - * @dpid: Data PID (DPID) - * * 2'b00: DATA0 - * * 2'b10: DATA1 - * * 2'b01: DATA2 - * * 2'b11: MDATA - * @bcnt: Byte Count (BCnt) - * Indicates the byte count of the received IN data packet - * @chnum: Channel Number (ChNum) - * Indicates the channel number to which the current received - * packet belongs. - */ - struct cvmx_usbcx_grxstsph_s { - __BITFIELD_FIELD(u32 reserved_21_31 : 11, - __BITFIELD_FIELD(u32 pktsts : 4, - __BITFIELD_FIELD(u32 dpid : 2, - __BITFIELD_FIELD(u32 bcnt : 11, - __BITFIELD_FIELD(u32 chnum : 4, - ;))))) - } s; -}; - -/** - * cvmx_usbc#_gusbcfg - * - * Core USB Configuration Register (GUSBCFG) - * - * This register can be used to configure the core after power-on or a changing - * to Host mode or Device mode. It contains USB and USB-PHY related - * configuration parameters. The application must program this register before - * starting any transactions on either the AHB or the USB. Do not make changes - * to this register after the initial programming. - */ -union cvmx_usbcx_gusbcfg { - u32 u32; - /** - * struct cvmx_usbcx_gusbcfg_s - * @otgi2csel: UTMIFS or I2C Interface Select (OtgI2CSel) - * This bit is always 0x0. - * @phylpwrclksel: PHY Low-Power Clock Select (PhyLPwrClkSel) - * Software should set this bit to 0x0. - * Selects either 480-MHz or 48-MHz (low-power) PHY mode. In - * FS and LS modes, the PHY can usually operate on a 48-MHz - * clock to save power. - * * 1'b0: 480-MHz Internal PLL clock - * * 1'b1: 48-MHz External Clock - * In 480 MHz mode, the UTMI interface operates at either 60 or - * 30-MHz, depending upon whether 8- or 16-bit data width is - * selected. In 48-MHz mode, the UTMI interface operates at 48 - * MHz in FS mode and at either 48 or 6 MHz in LS mode - * (depending on the PHY vendor). - * This bit drives the utmi_fsls_low_power core output signal, and - * is valid only for UTMI+ PHYs. - * @usbtrdtim: USB Turnaround Time (USBTrdTim) - * Sets the turnaround time in PHY clocks. - * Specifies the response time for a MAC request to the Packet - * FIFO Controller (PFC) to fetch data from the DFIFO (SPRAM). - * This must be programmed to 0x5. - * @hnpcap: HNP-Capable (HNPCap) - * This bit is always 0x0. - * @srpcap: SRP-Capable (SRPCap) - * This bit is always 0x0. - * @ddrsel: ULPI DDR Select (DDRSel) - * Software should set this bit to 0x0. - * @physel: USB 2.0 High-Speed PHY or USB 1.1 Full-Speed Serial - * Software should set this bit to 0x0. - * @fsintf: Full-Speed Serial Interface Select (FSIntf) - * Software should set this bit to 0x0. - * @ulpi_utmi_sel: ULPI or UTMI+ Select (ULPI_UTMI_Sel) - * This bit is always 0x0. - * @phyif: PHY Interface (PHYIf) - * This bit is always 0x1. - * @toutcal: HS/FS Timeout Calibration (TOutCal) - * The number of PHY clocks that the application programs in this - * field is added to the high-speed/full-speed interpacket timeout - * duration in the core to account for any additional delays - * introduced by the PHY. This may be required, since the delay - * introduced by the PHY in generating the linestate condition may - * vary from one PHY to another. - * The USB standard timeout value for high-speed operation is - * 736 to 816 (inclusive) bit times. The USB standard timeout - * value for full-speed operation is 16 to 18 (inclusive) bit - * times. The application must program this field based on the - * speed of enumeration. The number of bit times added per PHY - * clock are: - * High-speed operation: - * * One 30-MHz PHY clock = 16 bit times - * * One 60-MHz PHY clock = 8 bit times - * Full-speed operation: - * * One 30-MHz PHY clock = 0.4 bit times - * * One 60-MHz PHY clock = 0.2 bit times - * * One 48-MHz PHY clock = 0.25 bit times - */ - struct cvmx_usbcx_gusbcfg_s { - __BITFIELD_FIELD(u32 reserved_17_31 : 15, - __BITFIELD_FIELD(u32 otgi2csel : 1, - __BITFIELD_FIELD(u32 phylpwrclksel : 1, - __BITFIELD_FIELD(u32 reserved_14_14 : 1, - __BITFIELD_FIELD(u32 usbtrdtim : 4, - __BITFIELD_FIELD(u32 hnpcap : 1, - __BITFIELD_FIELD(u32 srpcap : 1, - __BITFIELD_FIELD(u32 ddrsel : 1, - __BITFIELD_FIELD(u32 physel : 1, - __BITFIELD_FIELD(u32 fsintf : 1, - __BITFIELD_FIELD(u32 ulpi_utmi_sel : 1, - __BITFIELD_FIELD(u32 phyif : 1, - __BITFIELD_FIELD(u32 toutcal : 3, - ;))))))))))))) - } s; -}; - -/** - * cvmx_usbc#_haint - * - * Host All Channels Interrupt Register (HAINT) - * - * When a significant event occurs on a channel, the Host All Channels Interrupt - * register interrupts the application using the Host Channels Interrupt bit of - * the Core Interrupt register (GINTSTS.HChInt). This is shown in Interrupt. - * There is one interrupt bit per channel, up to a maximum of 16 bits. Bits in - * this register are set and cleared when the application sets and clears bits - * in the corresponding Host Channel-n Interrupt register. - */ -union cvmx_usbcx_haint { - u32 u32; - /** - * struct cvmx_usbcx_haint_s - * @haint: Channel Interrupts (HAINT) - * One bit per channel: Bit 0 for Channel 0, bit 15 for Channel 15 - */ - struct cvmx_usbcx_haint_s { - __BITFIELD_FIELD(u32 reserved_16_31 : 16, - __BITFIELD_FIELD(u32 haint : 16, - ;)) - } s; -}; - -/** - * cvmx_usbc#_haintmsk - * - * Host All Channels Interrupt Mask Register (HAINTMSK) - * - * The Host All Channel Interrupt Mask register works with the Host All Channel - * Interrupt register to interrupt the application when an event occurs on a - * channel. There is one interrupt mask bit per channel, up to a maximum of 16 - * bits. - * Mask interrupt: 1'b0 Unmask interrupt: 1'b1 - */ -union cvmx_usbcx_haintmsk { - u32 u32; - /** - * struct cvmx_usbcx_haintmsk_s - * @haintmsk: Channel Interrupt Mask (HAINTMsk) - * One bit per channel: Bit 0 for channel 0, bit 15 for channel 15 - */ - struct cvmx_usbcx_haintmsk_s { - __BITFIELD_FIELD(u32 reserved_16_31 : 16, - __BITFIELD_FIELD(u32 haintmsk : 16, - ;)) - } s; -}; - -/** - * cvmx_usbc#_hcchar# - * - * Host Channel-n Characteristics Register (HCCHAR) - * - */ -union cvmx_usbcx_hccharx { - u32 u32; - /** - * struct cvmx_usbcx_hccharx_s - * @chena: Channel Enable (ChEna) - * This field is set by the application and cleared by the OTG - * host. - * * 1'b0: Channel disabled - * * 1'b1: Channel enabled - * @chdis: Channel Disable (ChDis) - * The application sets this bit to stop transmitting/receiving - * data on a channel, even before the transfer for that channel is - * complete. The application must wait for the Channel Disabled - * interrupt before treating the channel as disabled. - * @oddfrm: Odd Frame (OddFrm) - * This field is set (reset) by the application to indicate that - * the OTG host must perform a transfer in an odd (micro)frame. - * This field is applicable for only periodic (isochronous and - * interrupt) transactions. - * * 1'b0: Even (micro)frame - * * 1'b1: Odd (micro)frame - * @devaddr: Device Address (DevAddr) - * This field selects the specific device serving as the data - * source or sink. - * @ec: Multi Count (MC) / Error Count (EC) - * When the Split Enable bit of the Host Channel-n Split Control - * register (HCSPLTn.SpltEna) is reset (1'b0), this field indicates - * to the host the number of transactions that should be executed - * per microframe for this endpoint. - * * 2'b00: Reserved. This field yields undefined results. - * * 2'b01: 1 transaction - * * 2'b10: 2 transactions to be issued for this endpoint per - * microframe - * * 2'b11: 3 transactions to be issued for this endpoint per - * microframe - * When HCSPLTn.SpltEna is set (1'b1), this field indicates the - * number of immediate retries to be performed for a periodic split - * transactions on transaction errors. This field must be set to at - * least 2'b01. - * @eptype: Endpoint Type (EPType) - * Indicates the transfer type selected. - * * 2'b00: Control - * * 2'b01: Isochronous - * * 2'b10: Bulk - * * 2'b11: Interrupt - * @lspddev: Low-Speed Device (LSpdDev) - * This field is set by the application to indicate that this - * channel is communicating to a low-speed device. - * @epdir: Endpoint Direction (EPDir) - * Indicates whether the transaction is IN or OUT. - * * 1'b0: OUT - * * 1'b1: IN - * @epnum: Endpoint Number (EPNum) - * Indicates the endpoint number on the device serving as the - * data source or sink. - * @mps: Maximum Packet Size (MPS) - * Indicates the maximum packet size of the associated endpoint. - */ - struct cvmx_usbcx_hccharx_s { - __BITFIELD_FIELD(u32 chena : 1, - __BITFIELD_FIELD(u32 chdis : 1, - __BITFIELD_FIELD(u32 oddfrm : 1, - __BITFIELD_FIELD(u32 devaddr : 7, - __BITFIELD_FIELD(u32 ec : 2, - __BITFIELD_FIELD(u32 eptype : 2, - __BITFIELD_FIELD(u32 lspddev : 1, - __BITFIELD_FIELD(u32 reserved_16_16 : 1, - __BITFIELD_FIELD(u32 epdir : 1, - __BITFIELD_FIELD(u32 epnum : 4, - __BITFIELD_FIELD(u32 mps : 11, - ;))))))))))) - } s; -}; - -/** - * cvmx_usbc#_hcfg - * - * Host Configuration Register (HCFG) - * - * This register configures the core after power-on. Do not make changes to this - * register after initializing the host. - */ -union cvmx_usbcx_hcfg { - u32 u32; - /** - * struct cvmx_usbcx_hcfg_s - * @fslssupp: FS- and LS-Only Support (FSLSSupp) - * The application uses this bit to control the core's enumeration - * speed. Using this bit, the application can make the core - * enumerate as a FS host, even if the connected device supports - * HS traffic. Do not make changes to this field after initial - * programming. - * * 1'b0: HS/FS/LS, based on the maximum speed supported by - * the connected device - * * 1'b1: FS/LS-only, even if the connected device can support HS - * @fslspclksel: FS/LS PHY Clock Select (FSLSPclkSel) - * When the core is in FS Host mode - * * 2'b00: PHY clock is running at 30/60 MHz - * * 2'b01: PHY clock is running at 48 MHz - * * Others: Reserved - * When the core is in LS Host mode - * * 2'b00: PHY clock is running at 30/60 MHz. When the - * UTMI+/ULPI PHY Low Power mode is not selected, use - * 30/60 MHz. - * * 2'b01: PHY clock is running at 48 MHz. When the UTMI+ - * PHY Low Power mode is selected, use 48MHz if the PHY - * supplies a 48 MHz clock during LS mode. - * * 2'b10: PHY clock is running at 6 MHz. In USB 1.1 FS mode, - * use 6 MHz when the UTMI+ PHY Low Power mode is - * selected and the PHY supplies a 6 MHz clock during LS - * mode. If you select a 6 MHz clock during LS mode, you must - * do a soft reset. - * * 2'b11: Reserved - */ - struct cvmx_usbcx_hcfg_s { - __BITFIELD_FIELD(u32 reserved_3_31 : 29, - __BITFIELD_FIELD(u32 fslssupp : 1, - __BITFIELD_FIELD(u32 fslspclksel : 2, - ;))) - } s; -}; - -/** - * cvmx_usbc#_hcint# - * - * Host Channel-n Interrupt Register (HCINT) - * - * This register indicates the status of a channel with respect to USB- and - * AHB-related events. The application must read this register when the Host - * Channels Interrupt bit of the Core Interrupt register (GINTSTS.HChInt) is - * set. Before the application can read this register, it must first read - * the Host All Channels Interrupt (HAINT) register to get the exact channel - * number for the Host Channel-n Interrupt register. The application must clear - * the appropriate bit in this register to clear the corresponding bits in the - * HAINT and GINTSTS registers. - */ -union cvmx_usbcx_hcintx { - u32 u32; - /** - * struct cvmx_usbcx_hcintx_s - * @datatglerr: Data Toggle Error (DataTglErr) - * @frmovrun: Frame Overrun (FrmOvrun) - * @bblerr: Babble Error (BblErr) - * @xacterr: Transaction Error (XactErr) - * @nyet: NYET Response Received Interrupt (NYET) - * @ack: ACK Response Received Interrupt (ACK) - * @nak: NAK Response Received Interrupt (NAK) - * @stall: STALL Response Received Interrupt (STALL) - * @ahberr: This bit is always 0x0. - * @chhltd: Channel Halted (ChHltd) - * Indicates the transfer completed abnormally either because of - * any USB transaction error or in response to disable request by - * the application. - * @xfercompl: Transfer Completed (XferCompl) - * Transfer completed normally without any errors. - */ - struct cvmx_usbcx_hcintx_s { - __BITFIELD_FIELD(u32 reserved_11_31 : 21, - __BITFIELD_FIELD(u32 datatglerr : 1, - __BITFIELD_FIELD(u32 frmovrun : 1, - __BITFIELD_FIELD(u32 bblerr : 1, - __BITFIELD_FIELD(u32 xacterr : 1, - __BITFIELD_FIELD(u32 nyet : 1, - __BITFIELD_FIELD(u32 ack : 1, - __BITFIELD_FIELD(u32 nak : 1, - __BITFIELD_FIELD(u32 stall : 1, - __BITFIELD_FIELD(u32 ahberr : 1, - __BITFIELD_FIELD(u32 chhltd : 1, - __BITFIELD_FIELD(u32 xfercompl : 1, - ;)))))))))))) - } s; -}; - -/** - * cvmx_usbc#_hcintmsk# - * - * Host Channel-n Interrupt Mask Register (HCINTMSKn) - * - * This register reflects the mask for each channel status described in the - * previous section. - * Mask interrupt: 1'b0 Unmask interrupt: 1'b1 - */ -union cvmx_usbcx_hcintmskx { - u32 u32; - /** - * struct cvmx_usbcx_hcintmskx_s - * @datatglerrmsk: Data Toggle Error Mask (DataTglErrMsk) - * @frmovrunmsk: Frame Overrun Mask (FrmOvrunMsk) - * @bblerrmsk: Babble Error Mask (BblErrMsk) - * @xacterrmsk: Transaction Error Mask (XactErrMsk) - * @nyetmsk: NYET Response Received Interrupt Mask (NyetMsk) - * @ackmsk: ACK Response Received Interrupt Mask (AckMsk) - * @nakmsk: NAK Response Received Interrupt Mask (NakMsk) - * @stallmsk: STALL Response Received Interrupt Mask (StallMsk) - * @ahberrmsk: AHB Error Mask (AHBErrMsk) - * @chhltdmsk: Channel Halted Mask (ChHltdMsk) - * @xfercomplmsk: Transfer Completed Mask (XferComplMsk) - */ - struct cvmx_usbcx_hcintmskx_s { - __BITFIELD_FIELD(u32 reserved_11_31 : 21, - __BITFIELD_FIELD(u32 datatglerrmsk : 1, - __BITFIELD_FIELD(u32 frmovrunmsk : 1, - __BITFIELD_FIELD(u32 bblerrmsk : 1, - __BITFIELD_FIELD(u32 xacterrmsk : 1, - __BITFIELD_FIELD(u32 nyetmsk : 1, - __BITFIELD_FIELD(u32 ackmsk : 1, - __BITFIELD_FIELD(u32 nakmsk : 1, - __BITFIELD_FIELD(u32 stallmsk : 1, - __BITFIELD_FIELD(u32 ahberrmsk : 1, - __BITFIELD_FIELD(u32 chhltdmsk : 1, - __BITFIELD_FIELD(u32 xfercomplmsk : 1, - ;)))))))))))) - } s; -}; - -/** - * cvmx_usbc#_hcsplt# - * - * Host Channel-n Split Control Register (HCSPLT) - * - */ -union cvmx_usbcx_hcspltx { - u32 u32; - /** - * struct cvmx_usbcx_hcspltx_s - * @spltena: Split Enable (SpltEna) - * The application sets this field to indicate that this channel is - * enabled to perform split transactions. - * @compsplt: Do Complete Split (CompSplt) - * The application sets this field to request the OTG host to - * perform a complete split transaction. - * @xactpos: Transaction Position (XactPos) - * This field is used to determine whether to send all, first, - * middle, or last payloads with each OUT transaction. - * * 2'b11: All. This is the entire data payload is of this - * transaction (which is less than or equal to 188 bytes). - * * 2'b10: Begin. This is the first data payload of this - * transaction (which is larger than 188 bytes). - * * 2'b00: Mid. This is the middle payload of this transaction - * (which is larger than 188 bytes). - * * 2'b01: End. This is the last payload of this transaction - * (which is larger than 188 bytes). - * @hubaddr: Hub Address (HubAddr) - * This field holds the device address of the transaction - * translator's hub. - * @prtaddr: Port Address (PrtAddr) - * This field is the port number of the recipient transaction - * translator. - */ - struct cvmx_usbcx_hcspltx_s { - __BITFIELD_FIELD(u32 spltena : 1, - __BITFIELD_FIELD(u32 reserved_17_30 : 14, - __BITFIELD_FIELD(u32 compsplt : 1, - __BITFIELD_FIELD(u32 xactpos : 2, - __BITFIELD_FIELD(u32 hubaddr : 7, - __BITFIELD_FIELD(u32 prtaddr : 7, - ;)))))) - } s; -}; - -/** - * cvmx_usbc#_hctsiz# - * - * Host Channel-n Transfer Size Register (HCTSIZ) - * - */ -union cvmx_usbcx_hctsizx { - u32 u32; - /** - * struct cvmx_usbcx_hctsizx_s - * @dopng: Do Ping (DoPng) - * Setting this field to 1 directs the host to do PING protocol. - * @pid: PID (Pid) - * The application programs this field with the type of PID to use - * for the initial transaction. The host will maintain this field - * for the rest of the transfer. - * * 2'b00: DATA0 - * * 2'b01: DATA2 - * * 2'b10: DATA1 - * * 2'b11: MDATA (non-control)/SETUP (control) - * @pktcnt: Packet Count (PktCnt) - * This field is programmed by the application with the expected - * number of packets to be transmitted (OUT) or received (IN). - * The host decrements this count on every successful - * transmission or reception of an OUT/IN packet. Once this count - * reaches zero, the application is interrupted to indicate normal - * completion. - * @xfersize: Transfer Size (XferSize) - * For an OUT, this field is the number of data bytes the host will - * send during the transfer. - * For an IN, this field is the buffer size that the application - * has reserved for the transfer. The application is expected to - * program this field as an integer multiple of the maximum packet - * size for IN transactions (periodic and non-periodic). - */ - struct cvmx_usbcx_hctsizx_s { - __BITFIELD_FIELD(u32 dopng : 1, - __BITFIELD_FIELD(u32 pid : 2, - __BITFIELD_FIELD(u32 pktcnt : 10, - __BITFIELD_FIELD(u32 xfersize : 19, - ;)))) - } s; -}; - -/** - * cvmx_usbc#_hfir - * - * Host Frame Interval Register (HFIR) - * - * This register stores the frame interval information for the current speed to - * which the O2P USB core has enumerated. - */ -union cvmx_usbcx_hfir { - u32 u32; - /** - * struct cvmx_usbcx_hfir_s - * @frint: Frame Interval (FrInt) - * The value that the application programs to this field specifies - * the interval between two consecutive SOFs (FS) or micro- - * SOFs (HS) or Keep-Alive tokens (HS). This field contains the - * number of PHY clocks that constitute the required frame - * interval. The default value set in this field for a FS operation - * when the PHY clock frequency is 60 MHz. The application can - * write a value to this register only after the Port Enable bit of - * the Host Port Control and Status register (HPRT.PrtEnaPort) - * has been set. If no value is programmed, the core calculates - * the value based on the PHY clock specified in the FS/LS PHY - * Clock Select field of the Host Configuration register - * (HCFG.FSLSPclkSel). Do not change the value of this field - * after the initial configuration. - * * 125 us (PHY clock frequency for HS) - * * 1 ms (PHY clock frequency for FS/LS) - */ - struct cvmx_usbcx_hfir_s { - __BITFIELD_FIELD(u32 reserved_16_31 : 16, - __BITFIELD_FIELD(u32 frint : 16, - ;)) - } s; -}; - -/** - * cvmx_usbc#_hfnum - * - * Host Frame Number/Frame Time Remaining Register (HFNUM) - * - * This register indicates the current frame number. - * It also indicates the time remaining (in terms of the number of PHY clocks) - * in the current (micro)frame. - */ -union cvmx_usbcx_hfnum { - u32 u32; - /** - * struct cvmx_usbcx_hfnum_s - * @frrem: Frame Time Remaining (FrRem) - * Indicates the amount of time remaining in the current - * microframe (HS) or frame (FS/LS), in terms of PHY clocks. - * This field decrements on each PHY clock. When it reaches - * zero, this field is reloaded with the value in the Frame - * Interval register and a new SOF is transmitted on the USB. - * @frnum: Frame Number (FrNum) - * This field increments when a new SOF is transmitted on the - * USB, and is reset to 0 when it reaches 16'h3FFF. - */ - struct cvmx_usbcx_hfnum_s { - __BITFIELD_FIELD(u32 frrem : 16, - __BITFIELD_FIELD(u32 frnum : 16, - ;)) - } s; -}; - -/** - * cvmx_usbc#_hprt - * - * Host Port Control and Status Register (HPRT) - * - * This register is available in both Host and Device modes. - * Currently, the OTG Host supports only one port. - * A single register holds USB port-related information such as USB reset, - * enable, suspend, resume, connect status, and test mode for each port. The - * R_SS_WC bits in this register can trigger an interrupt to the application - * through the Host Port Interrupt bit of the Core Interrupt register - * (GINTSTS.PrtInt). On a Port Interrupt, the application must read this - * register and clear the bit that caused the interrupt. For the R_SS_WC bits, - * the application must write a 1 to the bit to clear the interrupt. - */ -union cvmx_usbcx_hprt { - u32 u32; - /** - * struct cvmx_usbcx_hprt_s - * @prtspd: Port Speed (PrtSpd) - * Indicates the speed of the device attached to this port. - * * 2'b00: High speed - * * 2'b01: Full speed - * * 2'b10: Low speed - * * 2'b11: Reserved - * @prttstctl: Port Test Control (PrtTstCtl) - * The application writes a nonzero value to this field to put - * the port into a Test mode, and the corresponding pattern is - * signaled on the port. - * * 4'b0000: Test mode disabled - * * 4'b0001: Test_J mode - * * 4'b0010: Test_K mode - * * 4'b0011: Test_SE0_NAK mode - * * 4'b0100: Test_Packet mode - * * 4'b0101: Test_Force_Enable - * * Others: Reserved - * PrtSpd must be zero (i.e. the interface must be in high-speed - * mode) to use the PrtTstCtl test modes. - * @prtpwr: Port Power (PrtPwr) - * The application uses this field to control power to this port, - * and the core clears this bit on an overcurrent condition. - * * 1'b0: Power off - * * 1'b1: Power on - * @prtlnsts: Port Line Status (PrtLnSts) - * Indicates the current logic level USB data lines - * * Bit [10]: Logic level of D- - * * Bit [11]: Logic level of D+ - * @prtrst: Port Reset (PrtRst) - * When the application sets this bit, a reset sequence is - * started on this port. The application must time the reset - * period and clear this bit after the reset sequence is - * complete. - * * 1'b0: Port not in reset - * * 1'b1: Port in reset - * The application must leave this bit set for at least a - * minimum duration mentioned below to start a reset on the - * port. The application can leave it set for another 10 ms in - * addition to the required minimum duration, before clearing - * the bit, even though there is no maximum limit set by the - * USB standard. - * * High speed: 50 ms - * * Full speed/Low speed: 10 ms - * @prtsusp: Port Suspend (PrtSusp) - * The application sets this bit to put this port in Suspend - * mode. The core only stops sending SOFs when this is set. - * To stop the PHY clock, the application must set the Port - * Clock Stop bit, which will assert the suspend input pin of - * the PHY. - * The read value of this bit reflects the current suspend - * status of the port. This bit is cleared by the core after a - * remote wakeup signal is detected or the application sets - * the Port Reset bit or Port Resume bit in this register or the - * Resume/Remote Wakeup Detected Interrupt bit or - * Disconnect Detected Interrupt bit in the Core Interrupt - * register (GINTSTS.WkUpInt or GINTSTS.DisconnInt, - * respectively). - * * 1'b0: Port not in Suspend mode - * * 1'b1: Port in Suspend mode - * @prtres: Port Resume (PrtRes) - * The application sets this bit to drive resume signaling on - * the port. The core continues to drive the resume signal - * until the application clears this bit. - * If the core detects a USB remote wakeup sequence, as - * indicated by the Port Resume/Remote Wakeup Detected - * Interrupt bit of the Core Interrupt register - * (GINTSTS.WkUpInt), the core starts driving resume - * signaling without application intervention and clears this bit - * when it detects a disconnect condition. The read value of - * this bit indicates whether the core is currently driving - * resume signaling. - * * 1'b0: No resume driven - * * 1'b1: Resume driven - * @prtovrcurrchng: Port Overcurrent Change (PrtOvrCurrChng) - * The core sets this bit when the status of the Port - * Overcurrent Active bit (bit 4) in this register changes. - * @prtovrcurract: Port Overcurrent Active (PrtOvrCurrAct) - * Indicates the overcurrent condition of the port. - * * 1'b0: No overcurrent condition - * * 1'b1: Overcurrent condition - * @prtenchng: Port Enable/Disable Change (PrtEnChng) - * The core sets this bit when the status of the Port Enable bit - * [2] of this register changes. - * @prtena: Port Enable (PrtEna) - * A port is enabled only by the core after a reset sequence, - * and is disabled by an overcurrent condition, a disconnect - * condition, or by the application clearing this bit. The - * application cannot set this bit by a register write. It can only - * clear it to disable the port. This bit does not trigger any - * interrupt to the application. - * * 1'b0: Port disabled - * * 1'b1: Port enabled - * @prtconndet: Port Connect Detected (PrtConnDet) - * The core sets this bit when a device connection is detected - * to trigger an interrupt to the application using the Host Port - * Interrupt bit of the Core Interrupt register (GINTSTS.PrtInt). - * The application must write a 1 to this bit to clear the - * interrupt. - * @prtconnsts: Port Connect Status (PrtConnSts) - * * 0: No device is attached to the port. - * * 1: A device is attached to the port. - */ - struct cvmx_usbcx_hprt_s { - __BITFIELD_FIELD(u32 reserved_19_31 : 13, - __BITFIELD_FIELD(u32 prtspd : 2, - __BITFIELD_FIELD(u32 prttstctl : 4, - __BITFIELD_FIELD(u32 prtpwr : 1, - __BITFIELD_FIELD(u32 prtlnsts : 2, - __BITFIELD_FIELD(u32 reserved_9_9 : 1, - __BITFIELD_FIELD(u32 prtrst : 1, - __BITFIELD_FIELD(u32 prtsusp : 1, - __BITFIELD_FIELD(u32 prtres : 1, - __BITFIELD_FIELD(u32 prtovrcurrchng : 1, - __BITFIELD_FIELD(u32 prtovrcurract : 1, - __BITFIELD_FIELD(u32 prtenchng : 1, - __BITFIELD_FIELD(u32 prtena : 1, - __BITFIELD_FIELD(u32 prtconndet : 1, - __BITFIELD_FIELD(u32 prtconnsts : 1, - ;))))))))))))))) - } s; -}; - -/** - * cvmx_usbc#_hptxfsiz - * - * Host Periodic Transmit FIFO Size Register (HPTXFSIZ) - * - * This register holds the size and the memory start address of the Periodic - * TxFIFO, as shown in Figures 310 and 311. - */ -union cvmx_usbcx_hptxfsiz { - u32 u32; - /** - * struct cvmx_usbcx_hptxfsiz_s - * @ptxfsize: Host Periodic TxFIFO Depth (PTxFSize) - * This value is in terms of 32-bit words. - * * Minimum value is 16 - * * Maximum value is 32768 - * @ptxfstaddr: Host Periodic TxFIFO Start Address (PTxFStAddr) - */ - struct cvmx_usbcx_hptxfsiz_s { - __BITFIELD_FIELD(u32 ptxfsize : 16, - __BITFIELD_FIELD(u32 ptxfstaddr : 16, - ;)) - } s; -}; - -/** - * cvmx_usbc#_hptxsts - * - * Host Periodic Transmit FIFO/Queue Status Register (HPTXSTS) - * - * This read-only register contains the free space information for the Periodic - * TxFIFO and the Periodic Transmit Request Queue - */ -union cvmx_usbcx_hptxsts { - u32 u32; - /** - * struct cvmx_usbcx_hptxsts_s - * @ptxqtop: Top of the Periodic Transmit Request Queue (PTxQTop) - * This indicates the entry in the Periodic Tx Request Queue that - * is currently being processes by the MAC. - * This register is used for debugging. - * * Bit [31]: Odd/Even (micro)frame - * - 1'b0: send in even (micro)frame - * - 1'b1: send in odd (micro)frame - * * Bits [30:27]: Channel/endpoint number - * * Bits [26:25]: Type - * - 2'b00: IN/OUT - * - 2'b01: Zero-length packet - * - 2'b10: CSPLIT - * - 2'b11: Disable channel command - * * Bit [24]: Terminate (last entry for the selected - * channel/endpoint) - * @ptxqspcavail: Periodic Transmit Request Queue Space Available - * (PTxQSpcAvail) - * Indicates the number of free locations available to be written - * in the Periodic Transmit Request Queue. This queue holds both - * IN and OUT requests. - * * 8'h0: Periodic Transmit Request Queue is full - * * 8'h1: 1 location available - * * 8'h2: 2 locations available - * * n: n locations available (0..8) - * * Others: Reserved - * @ptxfspcavail: Periodic Transmit Data FIFO Space Available - * (PTxFSpcAvail) - * Indicates the number of free locations available to be written - * to in the Periodic TxFIFO. - * Values are in terms of 32-bit words - * * 16'h0: Periodic TxFIFO is full - * * 16'h1: 1 word available - * * 16'h2: 2 words available - * * 16'hn: n words available (where 0..32768) - * * 16'h8000: 32768 words available - * * Others: Reserved - */ - struct cvmx_usbcx_hptxsts_s { - __BITFIELD_FIELD(u32 ptxqtop : 8, - __BITFIELD_FIELD(u32 ptxqspcavail : 8, - __BITFIELD_FIELD(u32 ptxfspcavail : 16, - ;))) - } s; -}; - -/** - * cvmx_usbn#_clk_ctl - * - * USBN_CLK_CTL = USBN's Clock Control - * - * This register is used to control the frequency of the hclk and the - * hreset and phy_rst signals. - */ -union cvmx_usbnx_clk_ctl { - u64 u64; - /** - * struct cvmx_usbnx_clk_ctl_s - * @divide2: The 'hclk' used by the USB subsystem is derived - * from the eclk. - * Also see the field DIVIDE. DIVIDE2<1> must currently - * be zero because it is not implemented, so the maximum - * ratio of eclk/hclk is currently 16. - * The actual divide number for hclk is: - * (DIVIDE2 + 1) * (DIVIDE + 1) - * @hclk_rst: When this field is '0' the HCLK-DIVIDER used to - * generate the hclk in the USB Subsystem is held - * in reset. This bit must be set to '0' before - * changing the value os DIVIDE in this register. - * The reset to the HCLK_DIVIDERis also asserted - * when core reset is asserted. - * @p_x_on: Force USB-PHY on during suspend. - * '1' USB-PHY XO block is powered-down during - * suspend. - * '0' USB-PHY XO block is powered-up during - * suspend. - * The value of this field must be set while POR is - * active. - * @p_rtype: PHY reference clock type - * On CN50XX/CN52XX/CN56XX the values are: - * '0' The USB-PHY uses a 12MHz crystal as a clock source - * at the USB_XO and USB_XI pins. - * '1' Reserved. - * '2' The USB_PHY uses 12/24/48MHz 2.5V board clock at the - * USB_XO pin. USB_XI should be tied to ground in this - * case. - * '3' Reserved. - * On CN3xxx bits 14 and 15 are p_xenbn and p_rclk and values are: - * '0' Reserved. - * '1' Reserved. - * '2' The PHY PLL uses the XO block output as a reference. - * The XO block uses an external clock supplied on the - * XO pin. USB_XI should be tied to ground for this - * usage. - * '3' The XO block uses the clock from a crystal. - * @p_com_on: '0' Force USB-PHY XO Bias, Bandgap and PLL to - * remain powered in Suspend Mode. - * '1' The USB-PHY XO Bias, Bandgap and PLL are - * powered down in suspend mode. - * The value of this field must be set while POR is - * active. - * @p_c_sel: Phy clock speed select. - * Selects the reference clock / crystal frequency. - * '11': Reserved - * '10': 48 MHz (reserved when a crystal is used) - * '01': 24 MHz (reserved when a crystal is used) - * '00': 12 MHz - * The value of this field must be set while POR is - * active. - * NOTE: if a crystal is used as a reference clock, - * this field must be set to 12 MHz. - * @cdiv_byp: Used to enable the bypass input to the USB_CLK_DIV. - * @sd_mode: Scaledown mode for the USBC. Control timing events - * in the USBC, for normal operation this must be '0'. - * @s_bist: Starts bist on the hclk memories, during the '0' - * to '1' transition. - * @por: Power On Reset for the PHY. - * Resets all the PHYS registers and state machines. - * @enable: When '1' allows the generation of the hclk. When - * '0' the hclk will not be generated. SEE DIVIDE - * field of this register. - * @prst: When this field is '0' the reset associated with - * the phy_clk functionality in the USB Subsystem is - * help in reset. This bit should not be set to '1' - * until the time it takes 6 clocks (hclk or phy_clk, - * whichever is slower) has passed. Under normal - * operation once this bit is set to '1' it should not - * be set to '0'. - * @hrst: When this field is '0' the reset associated with - * the hclk functioanlity in the USB Subsystem is - * held in reset.This bit should not be set to '1' - * until 12ms after phy_clk is stable. Under normal - * operation, once this bit is set to '1' it should - * not be set to '0'. - * @divide: The frequency of 'hclk' used by the USB subsystem - * is the eclk frequency divided by the value of - * (DIVIDE2 + 1) * (DIVIDE + 1), also see the field - * DIVIDE2 of this register. - * The hclk frequency should be less than 125Mhz. - * After writing a value to this field the SW should - * read the field for the value written. - * The ENABLE field of this register should not be set - * until AFTER this field is set and then read. - */ - struct cvmx_usbnx_clk_ctl_s { - __BITFIELD_FIELD(u64 reserved_20_63 : 44, - __BITFIELD_FIELD(u64 divide2 : 2, - __BITFIELD_FIELD(u64 hclk_rst : 1, - __BITFIELD_FIELD(u64 p_x_on : 1, - __BITFIELD_FIELD(u64 p_rtype : 2, - __BITFIELD_FIELD(u64 p_com_on : 1, - __BITFIELD_FIELD(u64 p_c_sel : 2, - __BITFIELD_FIELD(u64 cdiv_byp : 1, - __BITFIELD_FIELD(u64 sd_mode : 2, - __BITFIELD_FIELD(u64 s_bist : 1, - __BITFIELD_FIELD(u64 por : 1, - __BITFIELD_FIELD(u64 enable : 1, - __BITFIELD_FIELD(u64 prst : 1, - __BITFIELD_FIELD(u64 hrst : 1, - __BITFIELD_FIELD(u64 divide : 3, - ;))))))))))))))) - } s; -}; - -/** - * cvmx_usbn#_usbp_ctl_status - * - * USBN_USBP_CTL_STATUS = USBP Control And Status Register - * - * Contains general control and status information for the USBN block. - */ -union cvmx_usbnx_usbp_ctl_status { - u64 u64; - /** - * struct cvmx_usbnx_usbp_ctl_status_s - * @txrisetune: HS Transmitter Rise/Fall Time Adjustment - * @txvreftune: HS DC Voltage Level Adjustment - * @txfslstune: FS/LS Source Impedance Adjustment - * @txhsxvtune: Transmitter High-Speed Crossover Adjustment - * @sqrxtune: Squelch Threshold Adjustment - * @compdistune: Disconnect Threshold Adjustment - * @otgtune: VBUS Valid Threshold Adjustment - * @otgdisable: OTG Block Disable - * @portreset: Per_Port Reset - * @drvvbus: Drive VBUS - * @lsbist: Low-Speed BIST Enable. - * @fsbist: Full-Speed BIST Enable. - * @hsbist: High-Speed BIST Enable. - * @bist_done: PHY Bist Done. - * Asserted at the end of the PHY BIST sequence. - * @bist_err: PHY Bist Error. - * Indicates an internal error was detected during - * the BIST sequence. - * @tdata_out: PHY Test Data Out. - * Presents either internally generated signals or - * test register contents, based upon the value of - * test_data_out_sel. - * @siddq: Drives the USBP (USB-PHY) SIDDQ input. - * Normally should be set to zero. - * When customers have no intent to use USB PHY - * interface, they should: - * - still provide 3.3V to USB_VDD33, and - * - tie USB_REXT to 3.3V supply, and - * - set USBN*_USBP_CTL_STATUS[SIDDQ]=1 - * @txpreemphasistune: HS Transmitter Pre-Emphasis Enable - * @dma_bmode: When set to 1 the L2C DMA address will be updated - * with byte-counts between packets. When set to 0 - * the L2C DMA address is incremented to the next - * 4-byte aligned address after adding byte-count. - * @usbc_end: Bigendian input to the USB Core. This should be - * set to '0' for operation. - * @usbp_bist: PHY, This is cleared '0' to run BIST on the USBP. - * @tclk: PHY Test Clock, used to load TDATA_IN to the USBP. - * @dp_pulld: PHY DP_PULLDOWN input to the USB-PHY. - * This signal enables the pull-down resistance on - * the D+ line. '1' pull down-resistance is connected - * to D+/ '0' pull down resistance is not connected - * to D+. When an A/B device is acting as a host - * (downstream-facing port), dp_pulldown and - * dm_pulldown are enabled. This must not toggle - * during normal operation. - * @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY. - * This signal enables the pull-down resistance on - * the D- line. '1' pull down-resistance is connected - * to D-. '0' pull down resistance is not connected - * to D-. When an A/B device is acting as a host - * (downstream-facing port), dp_pulldown and - * dm_pulldown are enabled. This must not toggle - * during normal operation. - * @hst_mode: When '0' the USB is acting as HOST, when '1' - * USB is acting as device. This field needs to be - * set while the USB is in reset. - * @tuning: Transmitter Tuning for High-Speed Operation. - * Tunes the current supply and rise/fall output - * times for high-speed operation. - * [20:19] == 11: Current supply increased - * approximately 9% - * [20:19] == 10: Current supply increased - * approximately 4.5% - * [20:19] == 01: Design default. - * [20:19] == 00: Current supply decreased - * approximately 4.5% - * [22:21] == 11: Rise and fall times are increased. - * [22:21] == 10: Design default. - * [22:21] == 01: Rise and fall times are decreased. - * [22:21] == 00: Rise and fall times are decreased - * further as compared to the 01 setting. - * @tx_bs_enh: Transmit Bit Stuffing on [15:8]. - * Enables or disables bit stuffing on data[15:8] - * when bit-stuffing is enabled. - * @tx_bs_en: Transmit Bit Stuffing on [7:0]. - * Enables or disables bit stuffing on data[7:0] - * when bit-stuffing is enabled. - * @loop_enb: PHY Loopback Test Enable. - * '1': During data transmission the receive is - * enabled. - * '0': During data transmission the receive is - * disabled. - * Must be '0' for normal operation. - * @vtest_enb: Analog Test Pin Enable. - * '1' The PHY's analog_test pin is enabled for the - * input and output of applicable analog test signals. - * '0' THe analog_test pin is disabled. - * @bist_enb: Built-In Self Test Enable. - * Used to activate BIST in the PHY. - * @tdata_sel: Test Data Out Select. - * '1' test_data_out[3:0] (PHY) register contents - * are output. '0' internally generated signals are - * output. - * @taddr_in: Mode Address for Test Interface. - * Specifies the register address for writing to or - * reading from the PHY test interface register. - * @tdata_in: Internal Testing Register Input Data and Select - * This is a test bus. Data is present on [3:0], - * and its corresponding select (enable) is present - * on bits [7:4]. - * @ate_reset: Reset input from automatic test equipment. - * This is a test signal. When the USB Core is - * powered up (not in Susned Mode), an automatic - * tester can use this to disable phy_clock and - * free_clk, then re-enable them with an aligned - * phase. - * '1': The phy_clk and free_clk outputs are - * disabled. "0": The phy_clock and free_clk outputs - * are available within a specific period after the - * de-assertion. - */ - struct cvmx_usbnx_usbp_ctl_status_s { - __BITFIELD_FIELD(u64 txrisetune : 1, - __BITFIELD_FIELD(u64 txvreftune : 4, - __BITFIELD_FIELD(u64 txfslstune : 4, - __BITFIELD_FIELD(u64 txhsxvtune : 2, - __BITFIELD_FIELD(u64 sqrxtune : 3, - __BITFIELD_FIELD(u64 compdistune : 3, - __BITFIELD_FIELD(u64 otgtune : 3, - __BITFIELD_FIELD(u64 otgdisable : 1, - __BITFIELD_FIELD(u64 portreset : 1, - __BITFIELD_FIELD(u64 drvvbus : 1, - __BITFIELD_FIELD(u64 lsbist : 1, - __BITFIELD_FIELD(u64 fsbist : 1, - __BITFIELD_FIELD(u64 hsbist : 1, - __BITFIELD_FIELD(u64 bist_done : 1, - __BITFIELD_FIELD(u64 bist_err : 1, - __BITFIELD_FIELD(u64 tdata_out : 4, - __BITFIELD_FIELD(u64 siddq : 1, - __BITFIELD_FIELD(u64 txpreemphasistune : 1, - __BITFIELD_FIELD(u64 dma_bmode : 1, - __BITFIELD_FIELD(u64 usbc_end : 1, - __BITFIELD_FIELD(u64 usbp_bist : 1, - __BITFIELD_FIELD(u64 tclk : 1, - __BITFIELD_FIELD(u64 dp_pulld : 1, - __BITFIELD_FIELD(u64 dm_pulld : 1, - __BITFIELD_FIELD(u64 hst_mode : 1, - __BITFIELD_FIELD(u64 tuning : 4, - __BITFIELD_FIELD(u64 tx_bs_enh : 1, - __BITFIELD_FIELD(u64 tx_bs_en : 1, - __BITFIELD_FIELD(u64 loop_enb : 1, - __BITFIELD_FIELD(u64 vtest_enb : 1, - __BITFIELD_FIELD(u64 bist_enb : 1, - __BITFIELD_FIELD(u64 tdata_sel : 1, - __BITFIELD_FIELD(u64 taddr_in : 4, - __BITFIELD_FIELD(u64 tdata_in : 8, - __BITFIELD_FIELD(u64 ate_reset : 1, - ;))))))))))))))))))))))))))))))))))) - } s; -}; - -#endif /* __OCTEON_HCD_H__ */ diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index 9ebd665e5d42..965330eec80a 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c @@ -469,8 +469,8 @@ void cvm_oct_rx_initialize(void) if (!(pow_receive_groups & BIT(i))) continue; - netif_napi_add(dev_for_napi, &oct_rx_group[i].napi, - cvm_oct_napi_poll, rx_napi_weight); + netif_napi_add_weight(dev_for_napi, &oct_rx_group[i].napi, + cvm_oct_napi_poll, rx_napi_weight); napi_enable(&oct_rx_group[i].napi); oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i; diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c index 7284cb4ac395..9363c5cfe50f 100644 --- a/drivers/staging/olpc_dcon/olpc_dcon.c +++ b/drivers/staging/olpc_dcon/olpc_dcon.c @@ -383,7 +383,7 @@ static void dcon_set_source(struct dcon_priv *dcon, int arg) static void dcon_set_source_sync(struct dcon_priv *dcon, int arg) { dcon_set_source(dcon, arg); - flush_scheduled_work(); + flush_work(&dcon->switch_source); } static ssize_t dcon_mode_show(struct device *dev, @@ -517,10 +517,7 @@ static struct device_attribute dcon_device_files[] = { static int dcon_bl_update(struct backlight_device *dev) { struct dcon_priv *dcon = bl_get_data(dev); - u8 level = dev->props.brightness & 0x0F; - - if (dev->props.power != FB_BLANK_UNBLANK) - level = 0; + u8 level = backlight_get_brightness(dev) & 0x0F; if (level != dcon->bl_val) dcon_set_backlight(dcon, level); diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c index 941aaa7eab2e..df02335fdbab 100644 --- a/drivers/staging/pi433/pi433_if.c +++ b/drivers/staging/pi433/pi433_if.c @@ -1406,7 +1406,7 @@ static int __init pi433_init(void) /* * Claim device numbers. Then register a class - * that will key udev/mdev to add/remove /dev nodes. Last, register + * that will key udev/mdev to add/remove /dev nodes. * Last, register the driver which manages those device numbers. */ status = alloc_chrdev_region(&pi433_dev, 0, N_PI433_MINORS, "pi433"); diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 113a3efd12e9..ca6b966f5dd3 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -1976,7 +1976,7 @@ static unsigned long qlge_process_mac_rx_intr(struct qlge_adapter *qdev, vlan_id); } else { /* Non-TCP/UDP large frames that span multiple buffers - * can be processed corrrectly by the split frame logic. + * can be processed correctly by the split frame logic. */ qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, vlan_id); @@ -2461,7 +2461,7 @@ static int qlge_tso(struct sk_buff *skb, struct qlge_ob_mac_tso_iocb_req *mac_io mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC; mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len); mac_iocb_ptr->total_hdrs_len = - cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb)); + cpu_to_le16(skb_tcp_all_headers(skb)); mac_iocb_ptr->net_trans_offset = cpu_to_le16(skb_network_offset(skb) | skb_transport_offset(skb) @@ -2955,7 +2955,7 @@ static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring void __iomem *doorbell_area = qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); int err = 0; - u64 tmp; + u64 dma; __le64 *base_indirect_ptr; int page_entries; @@ -3004,15 +3004,15 @@ static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring FLAGS_LI; /* Load irq delay values */ if (rx_ring->cq_id < qdev->rss_ring_count) { cqicb->flags |= FLAGS_LL; /* Load lbq values */ - tmp = (u64)rx_ring->lbq.base_dma; + dma = (u64)rx_ring->lbq.base_dma; base_indirect_ptr = rx_ring->lbq.base_indirect; - page_entries = 0; - do { - *base_indirect_ptr = cpu_to_le64(tmp); - tmp += DB_PAGE_SIZE; - base_indirect_ptr++; - page_entries++; - } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN)); + + for (page_entries = 0; + page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN); + page_entries++) { + base_indirect_ptr[page_entries] = cpu_to_le64(dma); + dma += DB_PAGE_SIZE; + } cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma); cqicb->lbq_buf_size = cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size)); @@ -3021,15 +3021,15 @@ static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring rx_ring->lbq.next_to_clean = 0; cqicb->flags |= FLAGS_LS; /* Load sbq values */ - tmp = (u64)rx_ring->sbq.base_dma; + dma = (u64)rx_ring->sbq.base_dma; base_indirect_ptr = rx_ring->sbq.base_indirect; - page_entries = 0; - do { - *base_indirect_ptr = cpu_to_le64(tmp); - tmp += DB_PAGE_SIZE; - base_indirect_ptr++; - page_entries++; - } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN)); + + for (page_entries = 0; + page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN); + page_entries++) { + base_indirect_ptr[page_entries] = cpu_to_le64(dma); + dma += DB_PAGE_SIZE; + } cqicb->sbq_addr = cpu_to_le64(rx_ring->sbq.base_indirect_dma); cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE); @@ -3041,8 +3041,8 @@ static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring /* Inbound completion handling rx_rings run in * separate NAPI contexts. */ - netif_napi_add(qdev->ndev, &rx_ring->napi, qlge_napi_poll_msix, - 64); + netif_napi_add_weight(qdev->ndev, &rx_ring->napi, + qlge_napi_poll_msix, 64); cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); } else { diff --git a/drivers/staging/r8188eu/Makefile b/drivers/staging/r8188eu/Makefile index 1d7982b618ba..eea16eb7caa0 100644 --- a/drivers/staging/r8188eu/Makefile +++ b/drivers/staging/r8188eu/Makefile @@ -5,7 +5,6 @@ r8188eu-y = \ hal/HalHWImg8188E_RF.o \ hal/HalPhyRf_8188e.o \ hal/HalPwrSeqCmd.o \ - hal/Hal8188EPwrSeq.o \ hal/Hal8188ERateAdaptive.o \ hal/hal_intf.o \ hal/hal_com.o \ diff --git a/drivers/staging/r8188eu/core/rtw_ap.c b/drivers/staging/r8188eu/core/rtw_ap.c index ac6effbecf6d..5bd9dfa57cc5 100644 --- a/drivers/staging/r8188eu/core/rtw_ap.c +++ b/drivers/staging/r8188eu/core/rtw_ap.c @@ -654,18 +654,17 @@ void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx) set_tx_beacon_cmd(padapter); } -/* -op_mode -Set to 0 (HT pure) under the following conditions - - all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or - - all STAs in the BSS are 20 MHz HT in 20 MHz BSS -Set to 1 (HT non-member protection) if there may be non-HT STAs - in both the primary and the secondary channel -Set to 2 if only HT STAs are associated in BSS, - however and at least one 20 MHz HT STA is associated -Set to 3 (HT mixed mode) when one or more non-HT STAs are associated - (currently non-GF HT station is considered as non-HT STA also) -*/ +/* op_mode + * Set to 0 (HT pure) under the following conditions + * - all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or + * - all STAs in the BSS are 20 MHz HT in 20 MHz BSS + * Set to 1 (HT non-member protection) if there may be non-HT STAs + * in both the primary and the secondary channel + * Set to 2 if only HT STAs are associated in BSS, + * however and at least one 20 MHz HT STA is associated + * Set to 3 (HT mixed mode) when one or more non-HT STAs are associated + * (currently non-GF HT station is considered as non-HT STA also) + */ static int rtw_ht_operation_update(struct adapter *padapter) { u16 cur_op_mode, new_op_mode; diff --git a/drivers/staging/r8188eu/core/rtw_cmd.c b/drivers/staging/r8188eu/core/rtw_cmd.c index 06523d91939a..5b6a891b5d67 100644 --- a/drivers/staging/r8188eu/core/rtw_cmd.c +++ b/drivers/staging/r8188eu/core/rtw_cmd.c @@ -898,8 +898,12 @@ static void traffic_status_watchdog(struct adapter *padapter) static void rtl8188e_sreset_xmit_status_check(struct adapter *padapter) { u32 txdma_status; + int res; + + res = rtw_read32(padapter, REG_TXDMA_STATUS, &txdma_status); + if (res) + return; - txdma_status = rtw_read32(padapter, REG_TXDMA_STATUS); if (txdma_status != 0x00) rtw_write32(padapter, REG_TXDMA_STATUS, txdma_status); /* total xmit irp = 4 */ @@ -1177,7 +1181,14 @@ exit: static bool rtw_is_hi_queue_empty(struct adapter *adapter) { - return (rtw_read32(adapter, REG_HGQ_INFORMATION) & 0x0000ff00) == 0; + int res; + u32 reg; + + res = rtw_read32(adapter, REG_HGQ_INFORMATION, ®); + if (res) + return false; + + return (reg & 0x0000ff00) == 0; } static void rtw_chk_hi_queue_hdl(struct adapter *padapter) diff --git a/drivers/staging/r8188eu/core/rtw_efuse.c b/drivers/staging/r8188eu/core/rtw_efuse.c index 0e0e60638880..df9534dd25cb 100644 --- a/drivers/staging/r8188eu/core/rtw_efuse.c +++ b/drivers/staging/r8188eu/core/rtw_efuse.c @@ -28,22 +28,35 @@ ReadEFuseByte( u32 value32; u8 readbyte; u16 retry; + int res; /* Write Address */ rtw_write8(Adapter, EFUSE_CTRL + 1, (_offset & 0xff)); - readbyte = rtw_read8(Adapter, EFUSE_CTRL + 2); + res = rtw_read8(Adapter, EFUSE_CTRL + 2, &readbyte); + if (res) + return; + rtw_write8(Adapter, EFUSE_CTRL + 2, ((_offset >> 8) & 0x03) | (readbyte & 0xfc)); /* Write bit 32 0 */ - readbyte = rtw_read8(Adapter, EFUSE_CTRL + 3); + res = rtw_read8(Adapter, EFUSE_CTRL + 3, &readbyte); + if (res) + return; + rtw_write8(Adapter, EFUSE_CTRL + 3, (readbyte & 0x7f)); /* Check bit 32 read-ready */ - retry = 0; - value32 = rtw_read32(Adapter, EFUSE_CTRL); - while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < 10000)) { - value32 = rtw_read32(Adapter, EFUSE_CTRL); - retry++; + res = rtw_read32(Adapter, EFUSE_CTRL, &value32); + if (res) + return; + + for (retry = 0; retry < 10000; retry++) { + res = rtw_read32(Adapter, EFUSE_CTRL, &value32); + if (res) + continue; + + if (((value32 >> 24) & 0xff) & 0x80) + break; } /* 20100205 Joseph: Add delay suggested by SD1 Victor. */ @@ -51,37 +64,11 @@ ReadEFuseByte( /* Designer says that there shall be some delay after ready bit is set, or the */ /* result will always stay on last data we read. */ udelay(50); - value32 = rtw_read32(Adapter, EFUSE_CTRL); + res = rtw_read32(Adapter, EFUSE_CTRL, &value32); + if (res) + return; *pbuf = (u8)(value32 & 0xff); -} - -/*----------------------------------------------------------------------------- - * Function: EFUSE_ShadowMapUpdate - * - * Overview: Transfer current EFUSE content to shadow init and modify map. - * - * Input: NONE - * - * Output: NONE - * - * Return: NONE - * - * Revised History: - * When Who Remark - * 11/13/2008 MHC Create Version 0. - * - *---------------------------------------------------------------------------*/ -void EFUSE_ShadowMapUpdate(struct adapter *pAdapter) -{ - struct eeprom_priv *pEEPROM = &pAdapter->eeprompriv; - - if (pEEPROM->bautoload_fail_flag) { - memset(pEEPROM->efuse_eeprom_data, 0xFF, EFUSE_MAP_LEN_88E); - return; - } - rtl8188e_EfusePowerSwitch(pAdapter, true); - rtl8188e_ReadEFuse(pAdapter, 0, EFUSE_MAP_LEN_88E, pEEPROM->efuse_eeprom_data); - rtl8188e_EfusePowerSwitch(pAdapter, false); + /* FIXME: return an error to caller */ } diff --git a/drivers/staging/r8188eu/core/rtw_fw.c b/drivers/staging/r8188eu/core/rtw_fw.c index 0451e5177644..95534f9c7a0f 100644 --- a/drivers/staging/r8188eu/core/rtw_fw.c +++ b/drivers/staging/r8188eu/core/rtw_fw.c @@ -44,18 +44,28 @@ static_assert(sizeof(struct rt_firmware_hdr) == 32); static void fw_download_enable(struct adapter *padapter, bool enable) { u8 tmp; + int res; if (enable) { /* MCU firmware download enable. */ - tmp = rtw_read8(padapter, REG_MCUFWDL); + res = rtw_read8(padapter, REG_MCUFWDL, &tmp); + if (res) + return; + rtw_write8(padapter, REG_MCUFWDL, tmp | 0x01); /* 8051 reset */ - tmp = rtw_read8(padapter, REG_MCUFWDL + 2); + res = rtw_read8(padapter, REG_MCUFWDL + 2, &tmp); + if (res) + return; + rtw_write8(padapter, REG_MCUFWDL + 2, tmp & 0xf7); } else { /* MCU firmware download disable. */ - tmp = rtw_read8(padapter, REG_MCUFWDL); + res = rtw_read8(padapter, REG_MCUFWDL, &tmp); + if (res) + return; + rtw_write8(padapter, REG_MCUFWDL, tmp & 0xfe); /* Reserved for fw extension. */ @@ -125,8 +135,13 @@ static int page_write(struct adapter *padapter, u32 page, u8 *buffer, u32 size) { u8 value8; u8 u8Page = (u8)(page & 0x07); + int res; + + res = rtw_read8(padapter, REG_MCUFWDL + 2, &value8); + if (res) + return _FAIL; - value8 = (rtw_read8(padapter, REG_MCUFWDL + 2) & 0xF8) | u8Page; + value8 = (value8 & 0xF8) | u8Page; rtw_write8(padapter, REG_MCUFWDL + 2, value8); return block_write(padapter, buffer, size); @@ -165,8 +180,12 @@ exit: void rtw_reset_8051(struct adapter *padapter) { u8 val8; + int res; + + res = rtw_read8(padapter, REG_SYS_FUNC_EN + 1, &val8); + if (res) + return; - val8 = rtw_read8(padapter, REG_SYS_FUNC_EN + 1); rtw_write8(padapter, REG_SYS_FUNC_EN + 1, val8 & (~BIT(2))); rtw_write8(padapter, REG_SYS_FUNC_EN + 1, val8 | (BIT(2))); } @@ -175,10 +194,14 @@ static int fw_free_to_go(struct adapter *padapter) { u32 counter = 0; u32 value32; + int res; /* polling CheckSum report */ do { - value32 = rtw_read32(padapter, REG_MCUFWDL); + res = rtw_read32(padapter, REG_MCUFWDL, &value32); + if (res) + continue; + if (value32 & FWDL_CHKSUM_RPT) break; } while (counter++ < POLLING_READY_TIMEOUT_COUNT); @@ -186,7 +209,10 @@ static int fw_free_to_go(struct adapter *padapter) if (counter >= POLLING_READY_TIMEOUT_COUNT) return _FAIL; - value32 = rtw_read32(padapter, REG_MCUFWDL); + res = rtw_read32(padapter, REG_MCUFWDL, &value32); + if (res) + return _FAIL; + value32 |= MCUFWDL_RDY; value32 &= ~WINTINI_RDY; rtw_write32(padapter, REG_MCUFWDL, value32); @@ -196,9 +222,10 @@ static int fw_free_to_go(struct adapter *padapter) /* polling for FW ready */ counter = 0; do { - value32 = rtw_read32(padapter, REG_MCUFWDL); - if (value32 & WINTINI_RDY) + res = rtw_read32(padapter, REG_MCUFWDL, &value32); + if (!res && value32 & WINTINI_RDY) return _SUCCESS; + udelay(5); } while (counter++ < POLLING_READY_TIMEOUT_COUNT); @@ -239,7 +266,7 @@ exit: int rtl8188e_firmware_download(struct adapter *padapter) { int ret = _SUCCESS; - u8 write_fw_retry = 0; + u8 reg; unsigned long fwdl_timeout; struct dvobj_priv *dvobj = adapter_to_dvobj(padapter); struct device *device = dvobj_to_dev(dvobj); @@ -259,9 +286,9 @@ int rtl8188e_firmware_download(struct adapter *padapter) fwhdr = (struct rt_firmware_hdr *)dvobj->firmware.data; if (IS_FW_HEADER_EXIST(fwhdr)) { - pr_info_once("R8188EU: Firmware Version %d, SubVersion %d, Signature 0x%x\n", - le16_to_cpu(fwhdr->version), fwhdr->subversion, - le16_to_cpu(fwhdr->signature)); + dev_info_once(device, "Firmware Version %d, SubVersion %d, Signature 0x%x\n", + le16_to_cpu(fwhdr->version), fwhdr->subversion, + le16_to_cpu(fwhdr->signature)); fw_data = fw_data + sizeof(struct rt_firmware_hdr); fw_size = fw_size - sizeof(struct rt_firmware_hdr); @@ -269,23 +296,34 @@ int rtl8188e_firmware_download(struct adapter *padapter) /* Suggested by Filen. If 8051 is running in RAM code, driver should inform Fw to reset by itself, */ /* or it will cause download Fw fail. 2010.02.01. by tynli. */ - if (rtw_read8(padapter, REG_MCUFWDL) & RAM_DL_SEL) { /* 8051 RAM code */ + ret = rtw_read8(padapter, REG_MCUFWDL, ®); + if (ret) { + ret = _FAIL; + goto exit; + } + + if (reg & RAM_DL_SEL) { /* 8051 RAM code */ rtw_write8(padapter, REG_MCUFWDL, 0x00); rtw_reset_8051(padapter); } fw_download_enable(padapter, true); fwdl_timeout = jiffies + msecs_to_jiffies(500); - while (1) { + do { /* reset the FWDL chksum */ - rtw_write8(padapter, REG_MCUFWDL, rtw_read8(padapter, REG_MCUFWDL) | FWDL_CHKSUM_RPT); + ret = rtw_read8(padapter, REG_MCUFWDL, ®); + if (ret) { + ret = _FAIL; + continue; + } - ret = write_fw(padapter, fw_data, fw_size); + rtw_write8(padapter, REG_MCUFWDL, reg | FWDL_CHKSUM_RPT); - if (ret == _SUCCESS || - (time_after(jiffies, fwdl_timeout) && write_fw_retry++ >= 3)) + ret = write_fw(padapter, fw_data, fw_size); + if (ret == _SUCCESS) break; - } + } while (!time_after(jiffies, fwdl_timeout)); + fw_download_enable(padapter, false); if (ret != _SUCCESS) goto exit; diff --git a/drivers/staging/r8188eu/core/rtw_ieee80211.c b/drivers/staging/r8188eu/core/rtw_ieee80211.c index 385a9ed8eff7..bc8543ea2e66 100644 --- a/drivers/staging/r8188eu/core/rtw_ieee80211.c +++ b/drivers/staging/r8188eu/core/rtw_ieee80211.c @@ -1048,6 +1048,7 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork) unsigned char *pbuf; int group_cipher = 0, pairwise_cipher = 0, is8021x = 0; int ret = _FAIL; + pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12); if (pbuf && (wpa_ielen > 0)) { diff --git a/drivers/staging/r8188eu/core/rtw_ioctl_set.c b/drivers/staging/r8188eu/core/rtw_ioctl_set.c index 7ba75f73e47e..17f6bcbeebf4 100644 --- a/drivers/staging/r8188eu/core/rtw_ioctl_set.c +++ b/drivers/staging/r8188eu/core/rtw_ioctl_set.c @@ -71,7 +71,6 @@ u8 rtw_do_join(struct adapter *padapter) pibss = padapter->registrypriv.dev_network.MacAddress; - memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid)); memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid)); rtw_update_registrypriv_dev_network(padapter); diff --git a/drivers/staging/r8188eu/core/rtw_iol.c b/drivers/staging/r8188eu/core/rtw_iol.c index af8e84a41b85..31e196ccd899 100644 --- a/drivers/staging/r8188eu/core/rtw_iol.c +++ b/drivers/staging/r8188eu/core/rtw_iol.c @@ -67,7 +67,7 @@ bool rtw_IOL_applied(struct adapter *adapter) return false; } -int _rtw_IOL_append_WB_cmd(struct xmit_frame *xmit_frame, u16 addr, u8 value, u8 mask) +int rtw_IOL_append_WB_cmd(struct xmit_frame *xmit_frame, u16 addr, u8 value, u8 mask) { struct ioreg_cfg cmd = {8, IOREG_CMD_WB_REG, 0x0, 0x0, 0x0}; @@ -81,7 +81,7 @@ int _rtw_IOL_append_WB_cmd(struct xmit_frame *xmit_frame, u16 addr, u8 value, u8 return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length); } -int _rtw_IOL_append_WW_cmd(struct xmit_frame *xmit_frame, u16 addr, u16 value, u16 mask) +int rtw_IOL_append_WW_cmd(struct xmit_frame *xmit_frame, u16 addr, u16 value, u16 mask) { struct ioreg_cfg cmd = {8, IOREG_CMD_WW_REG, 0x0, 0x0, 0x0}; @@ -95,7 +95,7 @@ int _rtw_IOL_append_WW_cmd(struct xmit_frame *xmit_frame, u16 addr, u16 value, u return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length); } -int _rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr, u32 value, u32 mask) +int rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr, u32 value, u32 mask) { struct ioreg_cfg cmd = {8, IOREG_CMD_WD_REG, 0x0, 0x0, 0x0}; @@ -109,7 +109,7 @@ int _rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr, u32 value, u return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length); } -int _rtw_IOL_append_WRF_cmd(struct xmit_frame *xmit_frame, u8 rf_path, u16 addr, u32 value, u32 mask) +int rtw_IOL_append_WRF_cmd(struct xmit_frame *xmit_frame, u8 rf_path, u16 addr, u32 value, u32 mask) { struct ioreg_cfg cmd = {8, IOREG_CMD_W_RF, 0x0, 0x0, 0x0}; diff --git a/drivers/staging/r8188eu/core/rtw_led.c b/drivers/staging/r8188eu/core/rtw_led.c index 2f3000428af7..d5c6c5e29621 100644 --- a/drivers/staging/r8188eu/core/rtw_led.c +++ b/drivers/staging/r8188eu/core/rtw_led.c @@ -16,7 +16,7 @@ (l)->CurrLedState == LED_BLINK_WPS_STOP || \ (l)->bLedWPSBlinkInProgress) -static void ResetLedStatus(struct LED_871x *pLed) +static void ResetLedStatus(struct led_priv *pLed) { pLed->CurrLedState = RTW_LED_OFF; /* Current LED state. */ pLed->bLedOn = false; /* true if LED is ON, false if LED is OFF. */ @@ -32,30 +32,40 @@ static void ResetLedStatus(struct LED_871x *pLed) pLed->bLedScanBlinkInProgress = false; } -static void SwLedOn(struct adapter *padapter, struct LED_871x *pLed) +static void SwLedOn(struct adapter *padapter, struct led_priv *pLed) { u8 LedCfg; + int res; if (padapter->bSurpriseRemoved || padapter->bDriverStopped) return; - LedCfg = rtw_read8(padapter, REG_LEDCFG2); + res = rtw_read8(padapter, REG_LEDCFG2, &LedCfg); + if (res) + return; + rtw_write8(padapter, REG_LEDCFG2, (LedCfg & 0xf0) | BIT(5) | BIT(6)); /* SW control led0 on. */ pLed->bLedOn = true; } -static void SwLedOff(struct adapter *padapter, struct LED_871x *pLed) +static void SwLedOff(struct adapter *padapter, struct led_priv *pLed) { u8 LedCfg; + int res; if (padapter->bSurpriseRemoved || padapter->bDriverStopped) goto exit; - LedCfg = rtw_read8(padapter, REG_LEDCFG2);/* 0x4E */ + res = rtw_read8(padapter, REG_LEDCFG2, &LedCfg);/* 0x4E */ + if (res) + goto exit; LedCfg &= 0x90; /* Set to software control. */ rtw_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3))); - LedCfg = rtw_read8(padapter, REG_MAC_PINMUX_CFG); + res = rtw_read8(padapter, REG_MAC_PINMUX_CFG, &LedCfg); + if (res) + goto exit; + LedCfg &= 0xFE; rtw_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg); exit: @@ -65,7 +75,7 @@ exit: static void blink_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); - struct LED_871x *pLed = container_of(dwork, struct LED_871x, blink_work); + struct led_priv *pLed = container_of(dwork, struct led_priv, blink_work); struct adapter *padapter = pLed->padapter; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; @@ -172,35 +182,32 @@ static void blink_work(struct work_struct *work) void rtl8188eu_InitSwLeds(struct adapter *padapter) { struct led_priv *pledpriv = &padapter->ledpriv; - struct LED_871x *pLed = &pledpriv->SwLed0; - pLed->padapter = padapter; - ResetLedStatus(pLed); - INIT_DELAYED_WORK(&pLed->blink_work, blink_work); + pledpriv->padapter = padapter; + ResetLedStatus(pledpriv); + INIT_DELAYED_WORK(&pledpriv->blink_work, blink_work); } void rtl8188eu_DeInitSwLeds(struct adapter *padapter) { struct led_priv *ledpriv = &padapter->ledpriv; - struct LED_871x *pLed = &ledpriv->SwLed0; - cancel_delayed_work_sync(&pLed->blink_work); - ResetLedStatus(pLed); - SwLedOff(padapter, pLed); + cancel_delayed_work_sync(&ledpriv->blink_work); + ResetLedStatus(ledpriv); + SwLedOff(padapter, ledpriv); } void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction) { - struct led_priv *ledpriv = &padapter->ledpriv; + struct led_priv *pLed = &padapter->ledpriv; struct registry_priv *registry_par; - struct LED_871x *pLed = &ledpriv->SwLed0; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped) || (!padapter->hw_init_completed)) return; - if (!ledpriv->bRegUseLed) + if (!pLed->bRegUseLed) return; registry_par = &padapter->registrypriv; diff --git a/drivers/staging/r8188eu/core/rtw_mlme.c b/drivers/staging/r8188eu/core/rtw_mlme.c index 5a815642c3f6..2705c9d87b14 100644 --- a/drivers/staging/r8188eu/core/rtw_mlme.c +++ b/drivers/staging/r8188eu/core/rtw_mlme.c @@ -676,7 +676,6 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf) _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); - memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid)); memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid)); rtw_update_registrypriv_dev_network(adapter); @@ -1118,7 +1117,7 @@ void rtw_sta_media_status_rpt(struct adapter *adapter, struct sta_info *psta, /* MACID|OPMODE:1 connect */ media_status_rpt = (u16)((psta->mac_id << 8) | mstatus); - SetHwReg8188EU(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status_rpt); + rtl8188e_set_FwMediaStatus_cmd(adapter, media_status_rpt); } void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf) @@ -1196,7 +1195,7 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf) u16 media_status; media_status = (mac_id << 8) | 0; /* MACID|OPMODE:0 means disconnect */ /* for STA, AP, ADHOC mode, report disconnect stauts to FW */ - SetHwReg8188EU(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status); + rtl8188e_set_FwMediaStatus_cmd(adapter, media_status); } if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) @@ -1253,7 +1252,6 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf) memcpy(pdev_network, &tgt_network->network, get_wlan_bssid_ex_sz(&tgt_network->network)); - memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid)); memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid)); rtw_update_registrypriv_dev_network(adapter); diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c index faf23fc950c5..32d0e101d0c2 100644 --- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c +++ b/drivers/staging/r8188eu/core/rtw_mlme_ext.c @@ -428,6 +428,58 @@ static u32 p2p_listen_state_process(struct adapter *padapter, unsigned char *da) return _SUCCESS; } +static void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe) +{ + u8 *pIE; + __le32 *pbuf; + + pIE = pframe + sizeof(struct ieee80211_hdr_3addr); + pbuf = (__le32 *)pIE; + + pmlmeext->TSFValue = le32_to_cpu(*(pbuf + 1)); + + pmlmeext->TSFValue = pmlmeext->TSFValue << 32; + + pmlmeext->TSFValue |= le32_to_cpu(*pbuf); +} + +static void correct_TSF(struct adapter *padapter) +{ + u8 reg; + int res; + u64 tsf; + struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; + struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; + + tsf = pmlmeext->TSFValue - do_div(pmlmeext->TSFValue, + pmlmeinfo->bcn_interval * 1024) - 1024; /* us */ + + if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) || + ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)) + rtw_stop_tx_beacon(padapter); + + /* disable related TSF function */ + res = rtw_read8(padapter, REG_BCN_CTRL, ®); + if (res) + return; + + rtw_write8(padapter, REG_BCN_CTRL, reg & (~BIT(3))); + + rtw_write32(padapter, REG_TSFTR, tsf); + rtw_write32(padapter, REG_TSFTR + 4, tsf >> 32); + + /* enable related TSF function */ + res = rtw_read8(padapter, REG_BCN_CTRL, ®); + if (res) + return; + + rtw_write8(padapter, REG_BCN_CTRL, reg | BIT(3)); + + if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) || + ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)) + rtw_resume_tx_beacon(padapter); +} + /**************************************************************************** Following are the callback functions for each subtype of the management frames @@ -582,7 +634,7 @@ unsigned int OnBeacon(struct adapter *padapter, struct recv_frame *precv_frame) pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe + sizeof(struct ieee80211_hdr_3addr), len - sizeof(struct ieee80211_hdr_3addr)); /* update TSF Value */ - update_TSF(pmlmeext, pframe, len); + update_TSF(pmlmeext, pframe); /* start auth */ start_clnt_auth(padapter); @@ -625,7 +677,7 @@ unsigned int OnBeacon(struct adapter *padapter, struct recv_frame *precv_frame) } /* update TSF Value */ - update_TSF(pmlmeext, pframe, len); + update_TSF(pmlmeext, pframe); /* report sta add event */ report_add_sta_event(padapter, GetAddr2Ptr(pframe), cam_idx); @@ -5363,26 +5415,20 @@ exit: return ret; } -void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned char action, unsigned short status) +void issue_action_BA(struct adapter *padapter, unsigned char *raddr, u8 action, u16 status) { - u8 category = WLAN_CATEGORY_BACK; u16 start_seq; - u16 BA_para_set; - u16 reason_code; - u16 BA_timeout_value; - __le16 le_tmp; u16 BA_starting_seqctrl = 0; struct xmit_frame *pmgntframe; struct pkt_attrib *pattrib; - u8 *pframe; - struct ieee80211_hdr *pwlanhdr; - __le16 *fctrl; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; struct sta_info *psta; struct sta_priv *pstapriv = &padapter->stapriv; struct registry_priv *pregpriv = &padapter->registrypriv; + struct ieee80211_mgmt *mgmt; + u16 capab, params; pmgntframe = alloc_mgtxmitframe(pxmitpriv); if (!pmgntframe) @@ -5394,81 +5440,70 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET); - pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET; - pwlanhdr = (struct ieee80211_hdr *)pframe; + mgmt = (struct ieee80211_mgmt *)(pmgntframe->buf_addr + TXDESC_OFFSET); - fctrl = &pwlanhdr->frame_control; - *(fctrl) = 0; + mgmt->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION | IEEE80211_FTYPE_MGMT); - /* memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN); */ - memcpy(pwlanhdr->addr1, raddr, ETH_ALEN); - memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN); - memcpy(pwlanhdr->addr3, get_my_bssid(&pmlmeinfo->network), ETH_ALEN); + memcpy(mgmt->da, raddr, ETH_ALEN); + memcpy(mgmt->sa, myid(&padapter->eeprompriv), ETH_ALEN); + memcpy(mgmt->bssid, get_my_bssid(&pmlmeinfo->network), ETH_ALEN); - SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq); + mgmt->seq_ctrl = cpu_to_le16(pmlmeext->mgnt_seq); pmlmeext->mgnt_seq++; - SetFrameSubType(pframe, WIFI_ACTION); - - pframe += sizeof(struct ieee80211_hdr_3addr); - pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr); - pframe = rtw_set_fixed_ie(pframe, 1, &(category), &pattrib->pktlen); - pframe = rtw_set_fixed_ie(pframe, 1, &(action), &pattrib->pktlen); + mgmt->u.action.category = WLAN_CATEGORY_BACK; - if (category == 3) { - switch (action) { - case 0: /* ADDBA req */ - do { - pmlmeinfo->dialogToken++; - } while (pmlmeinfo->dialogToken == 0); - pframe = rtw_set_fixed_ie(pframe, 1, &pmlmeinfo->dialogToken, &pattrib->pktlen); + switch (action) { + case WLAN_ACTION_ADDBA_REQ: + mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ; + do { + pmlmeinfo->dialogToken++; + } while (pmlmeinfo->dialogToken == 0); + mgmt->u.action.u.addba_req.dialog_token = pmlmeinfo->dialogToken; - BA_para_set = (0x1002 | ((status & 0xf) << 2)); /* immediate ack & 64 buffer size */ - le_tmp = cpu_to_le16(BA_para_set); - pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&le_tmp, &pattrib->pktlen); + /* immediate ack & 64 buffer size */ + capab = u16_encode_bits(64, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK); + capab |= u16_encode_bits(1, IEEE80211_ADDBA_PARAM_POLICY_MASK); + capab |= u16_encode_bits(status, IEEE80211_ADDBA_PARAM_TID_MASK); + mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); - BA_timeout_value = 5000;/* 5ms */ - le_tmp = cpu_to_le16(BA_timeout_value); - pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&le_tmp, &pattrib->pktlen); + mgmt->u.action.u.addba_req.timeout = cpu_to_le16(5000); /* 5 ms */ - psta = rtw_get_stainfo(pstapriv, raddr); - if (psta) { - start_seq = (psta->sta_xmitpriv.txseq_tid[status & 0x07] & 0xfff) + 1; + psta = rtw_get_stainfo(pstapriv, raddr); + if (psta) { + start_seq = (psta->sta_xmitpriv.txseq_tid[status & 0x07] & 0xfff) + 1; - psta->BA_starting_seqctrl[status & 0x07] = start_seq; + psta->BA_starting_seqctrl[status & 0x07] = start_seq; - BA_starting_seqctrl = start_seq << 4; - } - le_tmp = cpu_to_le16(BA_starting_seqctrl); - pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&le_tmp, &pattrib->pktlen); - break; - case 1: /* ADDBA rsp */ - pframe = rtw_set_fixed_ie(pframe, 1, &pmlmeinfo->ADDBA_req.dialog_token, &pattrib->pktlen); - pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&status, &pattrib->pktlen); - BA_para_set = le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f; - BA_para_set |= 0x1000; /* 64 buffer size */ - - if (pregpriv->ampdu_amsdu == 0)/* disabled */ - BA_para_set = BA_para_set & ~BIT(0); - else if (pregpriv->ampdu_amsdu == 1)/* enabled */ - BA_para_set = BA_para_set | BIT(0); - le_tmp = cpu_to_le16(BA_para_set); - - pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&le_tmp, &pattrib->pktlen); - pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&pmlmeinfo->ADDBA_req.BA_timeout_value, &pattrib->pktlen); - break; - case 2:/* DELBA */ - BA_para_set = (status & 0x1F) << 3; - le_tmp = cpu_to_le16(BA_para_set); - pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&le_tmp, &pattrib->pktlen); - - reason_code = 37;/* Requested from peer STA as it does not want to use the mechanism */ - le_tmp = cpu_to_le16(reason_code); - pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&le_tmp, &pattrib->pktlen); - break; - default: - break; + BA_starting_seqctrl = start_seq << 4; } + mgmt->u.action.u.addba_req.start_seq_num = cpu_to_le16(BA_starting_seqctrl); + + pattrib->pktlen = offsetofend(struct ieee80211_mgmt, + u.action.u.addba_req.start_seq_num); + break; + case WLAN_ACTION_ADDBA_RESP: + mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP; + mgmt->u.action.u.addba_resp.dialog_token = pmlmeinfo->ADDBA_req.dialog_token; + mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); + capab = le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f; + capab |= u16_encode_bits(64, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK); + capab |= u16_encode_bits(pregpriv->ampdu_amsdu, IEEE80211_ADDBA_PARAM_AMSDU_MASK); + mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); + mgmt->u.action.u.addba_resp.timeout = pmlmeinfo->ADDBA_req.BA_timeout_value; + pattrib->pktlen = offsetofend(struct ieee80211_mgmt, u.action.u.addba_resp.timeout); + break; + case WLAN_ACTION_DELBA: + mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA; + mgmt->u.action.u.delba.params = cpu_to_le16((status & 0x1F) << 3); + params = u16_encode_bits((status & 0x1), IEEE80211_DELBA_PARAM_INITIATOR_MASK); + params |= u16_encode_bits((status >> 1) & 0xF, IEEE80211_DELBA_PARAM_TID_MASK); + mgmt->u.action.u.delba.params = cpu_to_le16(params); + mgmt->u.action.u.delba.reason_code = cpu_to_le16(WLAN_STATUS_REQUEST_DECLINED); + pattrib->pktlen = offsetofend(struct ieee80211_mgmt, u.action.u.delba.reason_code); + break; + default: + break; } pattrib->last_txcmdsz = pattrib->pktlen; @@ -5623,7 +5658,7 @@ unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr) if (initiator == 0) { /* recipient */ for (tid = 0; tid < MAXTID; tid++) { if (psta->recvreorder_ctrl[tid].enable) { - issue_action_BA(padapter, addr, RTW_WLAN_ACTION_DELBA, (((tid << 1) | initiator) & 0x1F)); + issue_action_BA(padapter, addr, WLAN_ACTION_DELBA, (((tid << 1) | initiator) & 0x1F)); psta->recvreorder_ctrl[tid].enable = false; psta->recvreorder_ctrl[tid].indicate_seq = 0xffff; } @@ -5631,7 +5666,7 @@ unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr) } else if (initiator == 1) { /* originator */ for (tid = 0; tid < MAXTID; tid++) { if (psta->htpriv.agg_enable_bitmap & BIT(tid)) { - issue_action_BA(padapter, addr, RTW_WLAN_ACTION_DELBA, (((tid << 1) | initiator) & 0x1F)); + issue_action_BA(padapter, addr, WLAN_ACTION_DELBA, (((tid << 1) | initiator) & 0x1F)); psta->htpriv.agg_enable_bitmap &= ~BIT(tid); psta->htpriv.candidate_tid_bitmap &= ~BIT(tid); } @@ -5667,14 +5702,129 @@ unsigned int send_beacon(struct adapter *padapter) bool get_beacon_valid_bit(struct adapter *adapter) { + int res; + u8 reg; + + res = rtw_read8(adapter, REG_TDECTRL + 2, ®); + if (res) + return false; + /* BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2 */ - return BIT(0) & rtw_read8(adapter, REG_TDECTRL + 2); + return BIT(0) & reg; } void clear_beacon_valid_bit(struct adapter *adapter) { + int res; + u8 reg; + + res = rtw_read8(adapter, REG_TDECTRL + 2, ®); + if (res) + return; + /* BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2, write 1 to clear, Clear by sw */ - rtw_write8(adapter, REG_TDECTRL + 2, rtw_read8(adapter, REG_TDECTRL + 2) | BIT(0)); + rtw_write8(adapter, REG_TDECTRL + 2, reg | BIT(0)); +} + +void rtw_resume_tx_beacon(struct adapter *adapt) +{ + struct hal_data_8188e *haldata = &adapt->haldata; + + /* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */ + /* which should be read from register to a global variable. */ + + rtw_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl) | BIT(6)); + haldata->RegFwHwTxQCtrl |= BIT(6); + rtw_write8(adapt, REG_TBTT_PROHIBIT + 1, 0xff); + haldata->RegReg542 |= BIT(0); + rtw_write8(adapt, REG_TBTT_PROHIBIT + 2, haldata->RegReg542); +} + +void rtw_stop_tx_beacon(struct adapter *adapt) +{ + struct hal_data_8188e *haldata = &adapt->haldata; + + /* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */ + /* which should be read from register to a global variable. */ + + rtw_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl) & (~BIT(6))); + haldata->RegFwHwTxQCtrl &= (~BIT(6)); + rtw_write8(adapt, REG_TBTT_PROHIBIT + 1, 0x64); + haldata->RegReg542 &= ~(BIT(0)); + rtw_write8(adapt, REG_TBTT_PROHIBIT + 2, haldata->RegReg542); + + /* todo: CheckFwRsvdPageContent(Adapter); 2010.06.23. Added by tynli. */ +} + +static void rtw_set_opmode(struct adapter *adapter, u8 mode) +{ + u8 val8; + int res; + + /* disable Port0 TSF update */ + res = rtw_read8(adapter, REG_BCN_CTRL, &val8); + if (res) + return; + + rtw_write8(adapter, REG_BCN_CTRL, val8 | BIT(4)); + + /* set net_type */ + res = rtw_read8(adapter, MSR, &val8); + if (res) + return; + + val8 &= 0x0c; + val8 |= mode; + rtw_write8(adapter, MSR, val8); + + if ((mode == _HW_STATE_STATION_) || (mode == _HW_STATE_NOLINK_)) { + rtw_stop_tx_beacon(adapter); + + rtw_write8(adapter, REG_BCN_CTRL, 0x19);/* disable atim wnd */ + } else if (mode == _HW_STATE_ADHOC_) { + rtw_resume_tx_beacon(adapter); + rtw_write8(adapter, REG_BCN_CTRL, 0x1a); + } else if (mode == _HW_STATE_AP_) { + rtw_resume_tx_beacon(adapter); + + rtw_write8(adapter, REG_BCN_CTRL, 0x12); + + /* Set RCR */ + rtw_write32(adapter, REG_RCR, 0x7000208e);/* CBSSID_DATA must set to 0,reject ICV_ERR packet */ + /* enable to rx data frame */ + rtw_write16(adapter, REG_RXFLTMAP2, 0xFFFF); + /* enable to rx ps-poll */ + rtw_write16(adapter, REG_RXFLTMAP1, 0x0400); + + /* Beacon Control related register for first time */ + rtw_write8(adapter, REG_BCNDMATIM, 0x02); /* 2ms */ + + rtw_write8(adapter, REG_ATIMWND, 0x0a); /* 10ms */ + rtw_write16(adapter, REG_BCNTCFG, 0x00); + rtw_write16(adapter, REG_TBTT_PROHIBIT, 0xff04); + rtw_write16(adapter, REG_TSFTR_SYN_OFFSET, 0x7fff);/* +32767 (~32ms) */ + + /* reset TSF */ + rtw_write8(adapter, REG_DUAL_TSF_RST, BIT(0)); + + /* BIT(3) - If set 0, hw will clr bcnq when tx becon ok/fail or port 0 */ + res = rtw_read8(adapter, REG_MBID_NUM, &val8); + if (res) + return; + + rtw_write8(adapter, REG_MBID_NUM, val8 | BIT(3) | BIT(4)); + + /* enable BCN0 Function for if1 */ + /* don't enable update TSF0 for if1 (due to TSF update when beacon/probe rsp are received) */ + rtw_write8(adapter, REG_BCN_CTRL, (DIS_TSF_UDT0_NORMAL_CHIP | EN_BCN_FUNCTION | BIT(1))); + + /* dis BCN1 ATIM WND if if2 is station */ + res = rtw_read8(adapter, REG_BCN_CTRL_1, &val8); + if (res) + return; + + rtw_write8(adapter, REG_BCN_CTRL_1, val8 | BIT(0)); + } } /**************************************************************************** @@ -5698,9 +5848,70 @@ static void rtw_set_initial_gain(struct adapter *adapter, u8 gain) } } +void rtw_mlme_under_site_survey(struct adapter *adapter) +{ + /* config RCR to receive different BSSID & not to receive data frame */ + + int res; + u8 reg; + u32 v; + + res = rtw_read32(adapter, REG_RCR, &v); + if (res) + return; + + v &= ~(RCR_CBSSID_BCN); + rtw_write32(adapter, REG_RCR, v); + /* reject all data frame */ + rtw_write16(adapter, REG_RXFLTMAP2, 0x00); + + /* disable update TSF */ + res = rtw_read8(adapter, REG_BCN_CTRL, ®); + if (res) + return; + + rtw_write8(adapter, REG_BCN_CTRL, reg | BIT(4)); +} + +void rtw_mlme_site_survey_done(struct adapter *adapter) +{ + struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv; + struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; + u32 reg32; + int res; + u8 reg; + + if ((is_client_associated_to_ap(adapter)) || + ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE)) { + /* enable to rx data frame */ + rtw_write16(adapter, REG_RXFLTMAP2, 0xFFFF); + + /* enable update TSF */ + res = rtw_read8(adapter, REG_BCN_CTRL, ®); + if (res) + return; + + rtw_write8(adapter, REG_BCN_CTRL, reg & (~BIT(4))); + } else if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) { + rtw_write16(adapter, REG_RXFLTMAP2, 0xFFFF); + /* enable update TSF */ + res = rtw_read8(adapter, REG_BCN_CTRL, ®); + if (res) + return; + + rtw_write8(adapter, REG_BCN_CTRL, reg & (~BIT(4))); + } + + res = rtw_read32(adapter, REG_RCR, ®32); + if (res) + return; + + rtw_write32(adapter, REG_RCR, reg32 | RCR_CBSSID_BCN); +} + void site_survey(struct adapter *padapter) { - unsigned char survey_channel = 0, val8; + unsigned char survey_channel = 0; enum rt_scan_type ScanType = SCAN_PASSIVE; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; @@ -5824,8 +6035,7 @@ void site_survey(struct adapter *padapter) if (is_client_associated_to_ap(padapter)) issue_nulldata(padapter, NULL, 0, 3, 500); - val8 = 0; /* survey done */ - SetHwReg8188EU(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8)); + rtw_mlme_site_survey_done(padapter); report_surveydone_event(padapter); @@ -6002,7 +6212,9 @@ static void rtw_set_bssid(struct adapter *adapter, u8 *bssid) static void mlme_join(struct adapter *adapter, int type) { struct mlme_priv *mlmepriv = &adapter->mlmepriv; - u8 retry_limit = 0x30; + u8 retry_limit = 0x30, reg; + u32 reg32; + int res; switch (type) { case 0: @@ -6010,8 +6222,12 @@ static void mlme_join(struct adapter *adapter, int type) /* enable to rx data frame, accept all data frame */ rtw_write16(adapter, REG_RXFLTMAP2, 0xFFFF); + res = rtw_read32(adapter, REG_RCR, ®32); + if (res) + return; + rtw_write32(adapter, REG_RCR, - rtw_read32(adapter, REG_RCR) | RCR_CBSSID_DATA | RCR_CBSSID_BCN); + reg32 | RCR_CBSSID_DATA | RCR_CBSSID_BCN); if (check_fwstate(mlmepriv, WIFI_STATION_STATE)) { retry_limit = 48; @@ -6027,7 +6243,11 @@ static void mlme_join(struct adapter *adapter, int type) case 2: /* sta add event call back */ /* enable update TSF */ - rtw_write8(adapter, REG_BCN_CTRL, rtw_read8(adapter, REG_BCN_CTRL) & (~BIT(4))); + res = rtw_read8(adapter, REG_BCN_CTRL, ®); + if (res) + return; + + rtw_write8(adapter, REG_BCN_CTRL, reg & (~BIT(4))); if (check_fwstate(mlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE)) retry_limit = 0x7; @@ -6184,14 +6404,14 @@ void start_clnt_assoc(struct adapter *padapter) set_link_timer(pmlmeext, REASSOC_TO); } -unsigned int receive_disconnect(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason) +void receive_disconnect(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason) { struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; /* check A3 */ if (!(!memcmp(MacAddr, get_my_bssid(&pmlmeinfo->network), ETH_ALEN))) - return _SUCCESS; + return; if ((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE) { if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) { @@ -6202,7 +6422,6 @@ unsigned int receive_disconnect(struct adapter *padapter, unsigned char *MacAddr report_join_res(padapter, -2); } } - return _SUCCESS; } static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid) @@ -6640,6 +6859,23 @@ void update_sta_info(struct adapter *padapter, struct sta_info *psta) psta->state = _FW_LINKED; } +static void rtw_reset_dm_func_flag(struct adapter *adapter) +{ + struct hal_data_8188e *haldata = &adapter->haldata; + struct dm_priv *dmpriv = &haldata->dmpriv; + struct odm_dm_struct *odmpriv = &haldata->odmpriv; + + odmpriv->SupportAbility = dmpriv->InitODMFlag; +} + +static void rtw_clear_dm_func_flag(struct adapter *adapter) +{ + struct hal_data_8188e *haldata = &adapter->haldata; + struct odm_dm_struct *odmpriv = &haldata->odmpriv; + + odmpriv->SupportAbility = 0; +} + void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res) { struct sta_info *psta, *psta_bmc; @@ -6670,12 +6906,12 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res) } /* turn on dynamic functions */ - SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_RESET, NULL); + rtw_reset_dm_func_flag(padapter); /* update IOT-releated issue */ update_IOT_info(padapter); - SetHwReg8188EU(padapter, HW_VAR_BASIC_RATE, cur_network->SupportedRates); + rtw_set_basic_rate(padapter, cur_network->SupportedRates); /* BCN interval */ rtw_write16(padapter, REG_BCN_INTERVAL, pmlmeinfo->bcn_interval); @@ -6702,14 +6938,14 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res) rtw_set_max_rpt_macid(padapter, psta->mac_id); media_status = (psta->mac_id << 8) | 1; /* MACID|OPMODE: 1 means connect */ - SetHwReg8188EU(padapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status); + rtl8188e_set_FwMediaStatus_cmd(padapter, media_status); } mlme_join(padapter, 2); if ((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE) { /* correcting TSF */ - correct_TSF(padapter, pmlmeext); + correct_TSF(padapter); } rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_CONNECT, 0); } @@ -6724,7 +6960,7 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *p /* nothing to do */ } else { /* adhoc client */ /* correcting TSF */ - correct_TSF(padapter, pmlmeext); + correct_TSF(padapter); /* start beacon */ if (send_beacon(padapter) == _FAIL) { @@ -6748,6 +6984,9 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *p static void mlme_disconnect(struct adapter *adapter) { + int res; + u8 reg; + /* Set RCR to not to receive data frame when NO LINK state */ /* reject all data frames */ rtw_write16(adapter, REG_RXFLTMAP2, 0x00); @@ -6756,7 +6995,12 @@ static void mlme_disconnect(struct adapter *adapter) rtw_write8(adapter, REG_DUAL_TSF_RST, (BIT(0) | BIT(1))); /* disable update TSF */ - rtw_write8(adapter, REG_BCN_CTRL, rtw_read8(adapter, REG_BCN_CTRL) | BIT(4)); + + res = rtw_read8(adapter, REG_BCN_CTRL, ®); + if (res) + return; + + rtw_write8(adapter, REG_BCN_CTRL, reg | BIT(4)); } void mlmeext_sta_del_event_callback(struct adapter *padapter) @@ -6810,14 +7054,20 @@ static u8 chk_ap_is_alive(struct sta_info *psta) return ret; } -static void rtl8188e_sreset_linked_status_check(struct adapter *padapter) +static int rtl8188e_sreset_linked_status_check(struct adapter *padapter) { - u32 rx_dma_status = rtw_read32(padapter, REG_RXDMA_STATUS); + u32 rx_dma_status; + int res; + u8 reg; + + res = rtw_read32(padapter, REG_RXDMA_STATUS, &rx_dma_status); + if (res) + return res; if (rx_dma_status != 0x00) rtw_write32(padapter, REG_RXDMA_STATUS, rx_dma_status); - rtw_read8(padapter, REG_FMETHR); + return rtw_read8(padapter, REG_FMETHR, ®); } void linked_status_chk(struct adapter *padapter) @@ -7045,7 +7295,7 @@ u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf) type = _HW_STATE_NOLINK_; } - SetHwReg8188EU(padapter, HW_VAR_SET_OPMODE, (u8 *)(&type)); + rtw_set_opmode(padapter, type); return H2C_SUCCESS; } @@ -7081,7 +7331,7 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf) /* disable dynamic functions, such as high power, DIG */ Save_DM_Func_Flag(padapter); - SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_CLR, NULL); + rtw_clear_dm_func_flag(padapter); /* cancel link timer */ _cancel_timer_ex(&pmlmeext->link_timer); @@ -7089,7 +7339,7 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf) /* clear CAM */ flush_all_cam_entry(padapter); - memcpy(pnetwork, pbuf, FIELD_OFFSET(struct wlan_bssid_ex, IELength)); + memcpy(pnetwork, pbuf, offsetof(struct wlan_bssid_ex, IELength)); pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength; if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */ @@ -7146,7 +7396,7 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf) pmlmeinfo->candidate_tid_bitmap = 0; pmlmeinfo->bwmode_updated = false; - memcpy(pnetwork, pbuf, FIELD_OFFSET(struct wlan_bssid_ex, IELength)); + memcpy(pnetwork, pbuf, offsetof(struct wlan_bssid_ex, IELength)); pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength; if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */ @@ -7219,6 +7469,7 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf) struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&pmlmeinfo->network); u8 val8; + int res; if (is_client_associated_to_ap(padapter)) issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms / 100, 100); @@ -7231,7 +7482,10 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf) if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)) { /* Stop BCN */ - val8 = rtw_read8(padapter, REG_BCN_CTRL); + res = rtw_read8(padapter, REG_BCN_CTRL, &val8); + if (res) + return H2C_DROPPED; + rtw_write8(padapter, REG_BCN_CTRL, val8 & (~(EN_BCN_FUNCTION | EN_TXBCN_RPT))); } @@ -7302,7 +7556,6 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf) struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct sitesurvey_parm *pparm = (struct sitesurvey_parm *)pbuf; u8 bdelayscan = false; - u8 val8; u32 i; struct wifidirect_info *pwdinfo = &padapter->wdinfo; @@ -7347,7 +7600,7 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf) if ((pmlmeext->sitesurvey_res.state == SCAN_START) || (pmlmeext->sitesurvey_res.state == SCAN_TXNULL)) { /* disable dynamic functions, such as high power, DIG */ Save_DM_Func_Flag(padapter); - SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_CLR, NULL); + rtw_clear_dm_func_flag(padapter); /* config the initial gain under scanning, need to write the BB registers */ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) @@ -7359,8 +7612,7 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf) /* set MSR to no link state */ Set_MSR(padapter, _HW_STATE_NOLINK_); - val8 = 1; /* under site survey */ - SetHwReg8188EU(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8)); + rtw_mlme_under_site_survey(padapter); pmlmeext->sitesurvey_res.state = SCAN_PROCESS; } @@ -7475,7 +7727,7 @@ u8 add_ba_hdl(struct adapter *padapter, unsigned char *pbuf) if (((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && (pmlmeinfo->HT_enable)) || ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)) { - issue_action_BA(padapter, pparm->addr, RTW_WLAN_ACTION_ADDBA_REQ, (u16)pparm->tid); + issue_action_BA(padapter, pparm->addr, WLAN_ACTION_ADDBA_REQ, (u16)pparm->tid); _set_timer(&psta->addba_retry_timer, ADDBA_TO); } else { psta->htpriv.candidate_tid_bitmap &= ~BIT(pparm->tid); diff --git a/drivers/staging/r8188eu/core/rtw_p2p.c b/drivers/staging/r8188eu/core/rtw_p2p.c index beffe5b16f1e..bd654d4ff8b4 100644 --- a/drivers/staging/r8188eu/core/rtw_p2p.c +++ b/drivers/staging/r8188eu/core/rtw_p2p.c @@ -1450,10 +1450,9 @@ static void restore_p2p_state_handler(struct adapter *padapter) static void pre_tx_invitereq_handler(struct adapter *padapter) { struct wifidirect_info *pwdinfo = &padapter->wdinfo; - u8 val8 = 1; set_channel_bwmode(padapter, pwdinfo->invitereq_info.peer_ch, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20); - SetHwReg8188EU(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8)); + rtw_mlme_under_site_survey(padapter); issue_probereq_p2p(padapter, NULL); _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT); @@ -1462,10 +1461,9 @@ static void pre_tx_invitereq_handler(struct adapter *padapter) static void pre_tx_provdisc_handler(struct adapter *padapter) { struct wifidirect_info *pwdinfo = &padapter->wdinfo; - u8 val8 = 1; set_channel_bwmode(padapter, pwdinfo->tx_prov_disc_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20); - SetHwReg8188EU(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8)); + rtw_mlme_under_site_survey(padapter); issue_probereq_p2p(padapter, NULL); _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT); @@ -1474,10 +1472,9 @@ static void pre_tx_provdisc_handler(struct adapter *padapter) static void pre_tx_negoreq_handler(struct adapter *padapter) { struct wifidirect_info *pwdinfo = &padapter->wdinfo; - u8 val8 = 1; set_channel_bwmode(padapter, pwdinfo->nego_req_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20); - SetHwReg8188EU(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8)); + rtw_mlme_under_site_survey(padapter); issue_probereq_p2p(padapter, NULL); _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT); @@ -1891,7 +1888,7 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role) if (role == P2P_ROLE_DEVICE || role == P2P_ROLE_CLIENT || role == P2P_ROLE_GO) { /* leave IPS/Autosuspend */ - if (rtw_pwr_wakeup(padapter) == _FAIL) { + if (rtw_pwr_wakeup(padapter)) { ret = _FAIL; goto exit; } @@ -1905,7 +1902,7 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role) init_wifidirect_info(padapter, role); } else if (role == P2P_ROLE_DISABLE) { - if (rtw_pwr_wakeup(padapter) == _FAIL) { + if (rtw_pwr_wakeup(padapter)) { ret = _FAIL; goto exit; } diff --git a/drivers/staging/r8188eu/core/rtw_pwrctrl.c b/drivers/staging/r8188eu/core/rtw_pwrctrl.c index 7b816b824947..10550bd2c16d 100644 --- a/drivers/staging/r8188eu/core/rtw_pwrctrl.c +++ b/drivers/staging/r8188eu/core/rtw_pwrctrl.c @@ -229,6 +229,9 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a static bool lps_rf_on(struct adapter *adapter) { + int res; + u32 reg; + /* When we halt NIC, we should check if FW LPS is leave. */ if (adapter->pwrctrlpriv.rf_pwrstate == rf_off) { /* If it is in HW/SW Radio OFF or IPS state, we do not check Fw LPS Leave, */ @@ -236,7 +239,11 @@ static bool lps_rf_on(struct adapter *adapter) return true; } - if (rtw_read32(adapter, REG_RCR) & 0x00070000) + res = rtw_read32(adapter, REG_RCR, ®); + if (res) + return false; + + if (reg & 0x00070000) return false; return true; @@ -266,7 +273,7 @@ static s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms) err = -1; break; } - rtw_usleep_os(100); + msleep(1); } return err; @@ -374,24 +381,24 @@ int rtw_pwr_wakeup(struct adapter *padapter) struct mlme_priv *pmlmepriv = &padapter->mlmepriv; unsigned long timeout = jiffies + msecs_to_jiffies(3000); unsigned long deny_time; - int ret = _SUCCESS; + int ret; while (pwrpriv->ps_processing && time_before(jiffies, timeout)) msleep(10); /* I think this should be check in IPS, LPS, autosuspend functions... */ - if (check_fwstate(pmlmepriv, _FW_LINKED)) { - ret = _SUCCESS; + /* Below goto is a success path taken for already linked devices */ + ret = 0; + if (check_fwstate(pmlmepriv, _FW_LINKED)) goto exit; - } if (pwrpriv->rf_pwrstate == rf_off && ips_leave(padapter) == _FAIL) { - ret = _FAIL; + ret = -ENOMEM; goto exit; } if (padapter->bDriverStopped || !padapter->bup || !padapter->hw_init_completed) { - ret = _FAIL; + ret = -EBUSY; goto exit; } @@ -432,7 +439,7 @@ int rtw_pm_set_ips(struct adapter *padapter, u8 mode) return 0; } else if (mode == IPS_NONE) { rtw_ips_mode_req(pwrctrlpriv, mode); - if ((padapter->bSurpriseRemoved == 0) && (rtw_pwr_wakeup(padapter) == _FAIL)) + if ((padapter->bSurpriseRemoved == 0) && rtw_pwr_wakeup(padapter)) return -EFAULT; } else { return -EINVAL; diff --git a/drivers/staging/r8188eu/core/rtw_recv.c b/drivers/staging/r8188eu/core/rtw_recv.c index df518439aea2..e5a7b7dfc387 100644 --- a/drivers/staging/r8188eu/core/rtw_recv.c +++ b/drivers/staging/r8188eu/core/rtw_recv.c @@ -17,14 +17,14 @@ static u8 SNAP_ETH_TYPE_APPLETALK_AARP[2] = {0x80, 0xf3}; /* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ static u8 rtw_bridge_tunnel_header[] = { - 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 + 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; static u8 rtw_rfc1042_header[] = { - 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 + 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; -void rtw_signal_stat_timer_hdl(struct timer_list *); +static void rtw_signal_stat_timer_hdl(struct timer_list *t); void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv) { @@ -62,7 +62,7 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter) goto exit; } - precvpriv->precv_frame_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ); + precvpriv->precv_frame_buf = (u8 *)ALIGN((size_t)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ); precvframe = (struct recv_frame *)precvpriv->precv_frame_buf; @@ -166,10 +166,8 @@ int rtw_free_recvframe(struct recv_frame *precvframe, struct __queue *pfree_recv list_add_tail(&precvframe->list, get_list_head(pfree_recv_queue)); - if (padapter) { - if (pfree_recv_queue == &precvpriv->free_recv_queue) - precvpriv->free_recvframe_cnt++; - } + if (padapter && (pfree_recv_queue == &precvpriv->free_recv_queue)) + precvpriv->free_recvframe_cnt++; spin_unlock_bh(&pfree_recv_queue->lock); @@ -204,12 +202,12 @@ int rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue) } /* -caller : defrag ; recvframe_chk_defrag in recv_thread (passive) -pframequeue: defrag_queue : will be accessed in recv_thread (passive) - -using spinlock to protect - -*/ + * caller : defrag ; recvframe_chk_defrag in recv_thread (passive) + * pframequeue: defrag_queue : will be accessed in recv_thread (passive) + * + * using spinlock to protect + * + */ void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfree_recv_queue) { @@ -237,6 +235,7 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter) { u32 cnt = 0; struct recv_frame *pending_frame; + while ((pending_frame = rtw_alloc_recvframe(&adapter->recvpriv.uc_swdec_pending_queue))) { rtw_free_recvframe(pending_frame, &adapter->recvpriv.free_recv_queue); cnt++; @@ -327,6 +326,7 @@ static struct recv_frame *decryptor(struct adapter *padapter, struct recv_frame if (prxattrib->encrypt > 0) { u8 *iv = precv_frame->rx_data + prxattrib->hdrlen; + prxattrib->key_index = (((iv[3]) >> 6) & 0x3); if (prxattrib->key_index > WEP_KEYS) { @@ -452,8 +452,7 @@ static int recv_decache(struct recv_frame *precv_frame, u8 bretry, struct stainf return _SUCCESS; } -void process_pwrbit_data(struct adapter *padapter, struct recv_frame *precv_frame); -void process_pwrbit_data(struct adapter *padapter, struct recv_frame *precv_frame) +static void process_pwrbit_data(struct adapter *padapter, struct recv_frame *precv_frame) { unsigned char pwrbit; u8 *ptr = precv_frame->rx_data; @@ -557,15 +556,9 @@ static void count_rx_stats(struct adapter *padapter, struct recv_frame *prframe, } } -int sta2sta_data_frame( - struct adapter *adapter, - struct recv_frame *precv_frame, - struct sta_info **psta -); - -int sta2sta_data_frame(struct adapter *adapter, struct recv_frame *precv_frame, struct sta_info **psta) +static int sta2sta_data_frame(struct adapter *adapter, + struct recv_frame *precv_frame, struct sta_info **psta) { - u8 *ptr = precv_frame->rx_data; int ret = _SUCCESS; struct rx_pkt_attrib *pattrib = &precv_frame->attrib; struct sta_priv *pstapriv = &adapter->stapriv; @@ -620,12 +613,6 @@ int sta2sta_data_frame(struct adapter *adapter, struct recv_frame *precv_frame, sta_addr = pattrib->src; } } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) { - memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN); - memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN); - memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN); - memcpy(pattrib->ra, pattrib->dst, ETH_ALEN); - memcpy(pattrib->ta, pattrib->src, ETH_ALEN); - sta_addr = mybssid; } else { ret = _FAIL; @@ -650,6 +637,7 @@ static int ap2sta_data_frame( struct sta_info **psta) { u8 *ptr = precv_frame->rx_data; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data; struct rx_pkt_attrib *pattrib = &precv_frame->attrib; int ret = _SUCCESS; struct sta_priv *pstapriv = &adapter->stapriv; @@ -694,24 +682,16 @@ static int ap2sta_data_frame( goto exit; } - /* if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) { */ - /* */ - - if (GetFrameSubType(ptr) & BIT(6)) { - /* No data, will not indicate to upper layer, temporily count it here */ + if (ieee80211_is_nullfunc(hdr->frame_control)) { + /* We count the nullfunc frame, but we'll not pass it on to higher layers. */ count_rx_stats(adapter, precv_frame, *psta); ret = RTW_RX_HANDLED; goto exit; } } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE) && check_fwstate(pmlmepriv, _FW_LINKED)) { - memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN); memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN); - memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN); - memcpy(pattrib->ra, pattrib->dst, ETH_ALEN); - memcpy(pattrib->ta, pattrib->src, ETH_ALEN); - /* */ memcpy(pattrib->bssid, mybssid, ETH_ALEN); *psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get sta_info */ @@ -778,6 +758,7 @@ static int sta2ap_data_frame(struct adapter *adapter, } } else { u8 *myhwaddr = myid(&adapter->eeprompriv); + if (memcmp(pattrib->ra, myhwaddr, ETH_ALEN)) { ret = RTW_RX_HANDLED; goto exit; @@ -1023,6 +1004,7 @@ static int validate_recv_frame(struct adapter *adapter, struct recv_frame *precv if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) { int ch_set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, rtw_get_oper_ch(adapter)); + if (ch_set_idx >= 0) pmlmeext->channel_set[ch_set_idx].rx_count++; } @@ -1050,6 +1032,7 @@ static int validate_recv_frame(struct adapter *adapter, struct recv_frame *precv retval = validate_recv_data_frame(adapter, precv_frame); if (retval == _FAIL) { struct recv_priv *precvpriv = &adapter->recvpriv; + precvpriv->rx_drop++; } } @@ -1313,9 +1296,11 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe) struct rx_pkt_attrib *pattrib; unsigned char *data_ptr; struct sk_buff *sub_skb, *subframes[MAX_SUBFRAME_COUNT]; + struct recv_priv *precvpriv = &padapter->recvpriv; struct __queue *pfree_recv_queue = &precvpriv->free_recv_queue; int ret = _SUCCESS; + nr_subframes = 0; pattrib = &prframe->attrib; @@ -1366,13 +1351,12 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe) a_len -= nSubframe_Length; if (a_len != 0) { padding_len = 4 - ((nSubframe_Length + ETH_HLEN) & (4 - 1)); - if (padding_len == 4) { + if (padding_len == 4) padding_len = 0; - } - if (a_len < padding_len) { + if (a_len < padding_len) goto exit; - } + pdata += padding_len; a_len -= padding_len; } @@ -1747,9 +1731,11 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe) !psecuritypriv->busetkipkey) { rtw_enqueue_recvframe(rframe, &padapter->recvpriv.uc_swdec_pending_queue); if (recvpriv->free_recvframe_cnt < NR_RECVFRAME / 4) { - /* to prevent from recvframe starvation, + /* + * to prevent from recvframe starvation, * get recvframe from uc_swdec_pending_queue to - * free_recvframe_cnt */ + * free_recvframe_cnt + */ rframe = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue); if (rframe) goto do_posthandle; @@ -1787,7 +1773,7 @@ _recv_entry_drop: return ret; } -void rtw_signal_stat_timer_hdl(struct timer_list *t) +static void rtw_signal_stat_timer_hdl(struct timer_list *t) { struct adapter *adapter = from_timer(adapter, t, recvpriv.signal_stat_timer); struct recv_priv *recvpriv = &adapter->recvpriv; diff --git a/drivers/staging/r8188eu/core/rtw_wlan_util.c b/drivers/staging/r8188eu/core/rtw_wlan_util.c index 392a65783f32..3a002cb6834f 100644 --- a/drivers/staging/r8188eu/core/rtw_wlan_util.c +++ b/drivers/staging/r8188eu/core/rtw_wlan_util.c @@ -264,23 +264,30 @@ void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen) void Save_DM_Func_Flag(struct adapter *padapter) { - u8 saveflag = true; + struct hal_data_8188e *haldata = &padapter->haldata; + struct odm_dm_struct *odmpriv = &haldata->odmpriv; - SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&saveflag)); + odmpriv->BK_SupportAbility = odmpriv->SupportAbility; } void Restore_DM_Func_Flag(struct adapter *padapter) { - u8 saveflag = false; + struct hal_data_8188e *haldata = &padapter->haldata; + struct odm_dm_struct *odmpriv = &haldata->odmpriv; - SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&saveflag)); + odmpriv->SupportAbility = odmpriv->BK_SupportAbility; } void Set_MSR(struct adapter *padapter, u8 type) { u8 val8; + int res; + + res = rtw_read8(padapter, MSR, &val8); + if (res) + return; - val8 = rtw_read8(padapter, MSR) & 0x0c; + val8 &= 0x0c; val8 |= type; rtw_write8(padapter, MSR, val8); } @@ -505,7 +512,11 @@ int WMM_param_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE) static void set_acm_ctrl(struct adapter *adapter, u8 acm_mask) { - u8 acmctrl = rtw_read8(adapter, REG_ACMHWCTRL); + u8 acmctrl; + int res = rtw_read8(adapter, REG_ACMHWCTRL, &acmctrl); + + if (res) + return; if (acm_mask > 1) acmctrl = acmctrl | 0x1; @@ -765,6 +776,7 @@ void HT_info_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE) static void set_min_ampdu_spacing(struct adapter *adapter, u8 spacing) { u8 sec_spacing; + int res; if (spacing <= 7) { switch (adapter->securitypriv.dot11PrivacyAlgrthm) { @@ -786,8 +798,38 @@ static void set_min_ampdu_spacing(struct adapter *adapter, u8 spacing) if (spacing < sec_spacing) spacing = sec_spacing; + res = rtw_read8(adapter, REG_AMPDU_MIN_SPACE, &sec_spacing); + if (res) + return; + rtw_write8(adapter, REG_AMPDU_MIN_SPACE, - (rtw_read8(adapter, REG_AMPDU_MIN_SPACE) & 0xf8) | spacing); + (sec_spacing & 0xf8) | spacing); + } +} + +static void set_ampdu_factor(struct adapter *adapter, u8 factor) +{ + u8 RegToSet_Normal[4] = {0x41, 0xa8, 0x72, 0xb9}; + u8 FactorToSet; + u8 *pRegToSet; + u8 index = 0; + + pRegToSet = RegToSet_Normal; /* 0xb972a841; */ + FactorToSet = factor; + if (FactorToSet <= 3) { + FactorToSet = (1 << (FactorToSet + 2)); + if (FactorToSet > 0xf) + FactorToSet = 0xf; + + for (index = 0; index < 4; index++) { + if ((pRegToSet[index] & 0xf0) > (FactorToSet << 4)) + pRegToSet[index] = (pRegToSet[index] & 0x0f) | (FactorToSet << 4); + + if ((pRegToSet[index] & 0x0f) > FactorToSet) + pRegToSet[index] = (pRegToSet[index] & 0xf0) | (FactorToSet); + + rtw_write8(adapter, (REG_AGGLEN_LMT + index), pRegToSet[index]); + } } } @@ -817,7 +859,7 @@ void HTOnAssocRsp(struct adapter *padapter) set_min_ampdu_spacing(padapter, min_MPDU_spacing); - SetHwReg8188EU(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len)); + set_ampdu_factor(padapter, max_AMPDU_len); } void ERP_IE_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE) @@ -1225,6 +1267,45 @@ void set_sta_rate(struct adapter *padapter, struct sta_info *psta) enable_rate_adaptive(padapter, psta->mac_id); } +void rtw_set_basic_rate(struct adapter *adapter, u8 *rates) +{ + u16 BrateCfg = 0; + u8 RateIndex = 0; + int res; + u8 reg; + + /* 2007.01.16, by Emily */ + /* Select RRSR (in Legacy-OFDM and CCK) */ + /* For 8190, we select only 24M, 12M, 6M, 11M, 5.5M, 2M, and 1M from the Basic rate. */ + /* We do not use other rates. */ + HalSetBrateCfg(adapter, rates, &BrateCfg); + + /* 2011.03.30 add by Luke Lee */ + /* CCK 2M ACK should be disabled for some BCM and Atheros AP IOT */ + /* because CCK 2M has poor TXEVM */ + /* CCK 5.5M & 11M ACK should be enabled for better performance */ + + BrateCfg = (BrateCfg | 0xd) & 0x15d; + + BrateCfg |= 0x01; /* default enable 1M ACK rate */ + /* Set RRSR rate table. */ + rtw_write8(adapter, REG_RRSR, BrateCfg & 0xff); + rtw_write8(adapter, REG_RRSR + 1, (BrateCfg >> 8) & 0xff); + res = rtw_read8(adapter, REG_RRSR + 2, ®); + if (res) + return; + + rtw_write8(adapter, REG_RRSR + 2, reg & 0xf0); + + /* Set RTS initial rate */ + while (BrateCfg > 0x1) { + BrateCfg = (BrateCfg >> 1); + RateIndex++; + } + /* Ziv - Check */ + rtw_write8(adapter, REG_INIRTS_RATE_SEL, RateIndex); +} + /* Update RRSR and Rate for USERATE */ void update_tx_basic_rate(struct adapter *padapter, u8 wirelessmode) { @@ -1250,7 +1331,7 @@ void update_tx_basic_rate(struct adapter *padapter, u8 wirelessmode) else update_mgnt_tx_rate(padapter, IEEE80211_OFDM_RATE_6MB); - SetHwReg8188EU(padapter, HW_VAR_BASIC_RATE, supported_rates); + rtw_set_basic_rate(padapter, supported_rates); } unsigned char check_assoc_AP(u8 *pframe, uint len) @@ -1348,6 +1429,30 @@ static void set_ack_preamble(struct adapter *adapter, bool short_preamble) rtw_write8(adapter, REG_RRSR + 2, val8); }; +static void set_slot_time(struct adapter *adapter, u8 slot_time) +{ + u8 u1bAIFS, aSifsTime; + struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv; + struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; + + rtw_write8(adapter, REG_SLOT, slot_time); + + if (pmlmeinfo->WMM_enable == 0) { + if (pmlmeext->cur_wireless_mode == WIRELESS_11B) + aSifsTime = 10; + else + aSifsTime = 16; + + u1bAIFS = aSifsTime + (2 * pmlmeinfo->slotTime); + + /* <Roger_EXP> Temporary removed, 2008.06.20. */ + rtw_write8(adapter, REG_EDCA_VO_PARAM, u1bAIFS); + rtw_write8(adapter, REG_EDCA_VI_PARAM, u1bAIFS); + rtw_write8(adapter, REG_EDCA_BE_PARAM, u1bAIFS); + rtw_write8(adapter, REG_EDCA_BK_PARAM, u1bAIFS); + } +} + void update_capinfo(struct adapter *Adapter, u16 updateCap) { struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv; @@ -1386,7 +1491,7 @@ void update_capinfo(struct adapter *Adapter, u16 updateCap) } } - SetHwReg8188EU(Adapter, HW_VAR_SLOT_TIME, &pmlmeinfo->slotTime); + set_slot_time(Adapter, pmlmeinfo->slotTime); } void update_wireless_mode(struct adapter *padapter) @@ -1466,26 +1571,6 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l return _SUCCESS; } -void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len) -{ - u8 *pIE; - __le32 *pbuf; - - pIE = pframe + sizeof(struct ieee80211_hdr_3addr); - pbuf = (__le32 *)pIE; - - pmlmeext->TSFValue = le32_to_cpu(*(pbuf + 1)); - - pmlmeext->TSFValue = pmlmeext->TSFValue << 32; - - pmlmeext->TSFValue |= le32_to_cpu(*pbuf); -} - -void correct_TSF(struct adapter *padapter, struct mlme_ext_priv *pmlmeext) -{ - SetHwReg8188EU(padapter, HW_VAR_CORRECT_TSF, NULL); -} - void beacon_timing_control(struct adapter *padapter) { SetBeaconRelatedRegisters8188EUsb(padapter); diff --git a/drivers/staging/r8188eu/core/rtw_xmit.c b/drivers/staging/r8188eu/core/rtw_xmit.c index 7135d89caac1..24401f3ae2a0 100644 --- a/drivers/staging/r8188eu/core/rtw_xmit.c +++ b/drivers/staging/r8188eu/core/rtw_xmit.c @@ -16,16 +16,13 @@ static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; static void _init_txservq(struct tx_servq *ptxservq) { - INIT_LIST_HEAD(&ptxservq->tx_pending); rtw_init_queue(&ptxservq->sta_pending); ptxservq->qcnt = 0; - } void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv) { - memset((unsigned char *)psta_xmitpriv, 0, sizeof(struct sta_xmit_priv)); spin_lock_init(&psta_xmitpriv->lock); _init_txservq(&psta_xmitpriv->be_q); @@ -34,7 +31,6 @@ void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv) _init_txservq(&psta_xmitpriv->vo_q); INIT_LIST_HEAD(&psta_xmitpriv->legacy_dz); INIT_LIST_HEAD(&psta_xmitpriv->apsd); - } s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) @@ -78,7 +74,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) res = _FAIL; goto exit; } - pxmitpriv->pxmit_frame_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_frame_buf), 4); + pxmitpriv->pxmit_frame_buf = (u8 *)ALIGN((size_t)(pxmitpriv->pallocated_frame_buf), 4); /* pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 - */ /* ((size_t) (pxmitpriv->pallocated_frame_buf) &3); */ @@ -115,7 +111,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) goto exit; } - pxmitpriv->pxmitbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_xmitbuf), 4); + pxmitpriv->pxmitbuf = (u8 *)ALIGN((size_t)(pxmitpriv->pallocated_xmitbuf), 4); /* pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - */ /* ((size_t) (pxmitpriv->pallocated_xmitbuf) &3); */ @@ -155,7 +151,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) goto exit; } - pxmitpriv->pxmit_extbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_xmit_extbuf), 4); + pxmitpriv->pxmit_extbuf = (u8 *)ALIGN((size_t)(pxmitpriv->pallocated_xmit_extbuf), 4); pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf; @@ -299,6 +295,7 @@ static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame * /* check HT op mode */ if (pattrib->ht_en) { u8 htopmode = pmlmeinfo->HT_protection; + if ((pmlmeext->cur_bwmode && (htopmode == 2 || htopmode == 3)) || (!pmlmeext->cur_bwmode && htopmode == 3)) { pattrib->vcs_mode = RTS_CTS; @@ -445,10 +442,11 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p pattrib->pktlen = pktfile.pkt_len; - if (ETH_P_IP == pattrib->ether_type) { + if (pattrib->ether_type == ETH_P_IP) { /* The following is for DHCP and ARP packet, we use cck1M to tx these packets and let LPS awake some time */ /* to prevent DHCP protocol fail */ u8 tmp[24]; + _rtw_pktfile_read(&pktfile, &tmp[0], 24); pattrib->dhcp_pkt = 0; if (pktfile.pkt_len > 282) {/* MINIMUM_DHCP_PACKET_SIZE) { */ @@ -627,7 +625,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr if (pframe[1] & 2) /* From Ds == 1 */ rtw_secmicappend(&micdata, &pframe[24], 6); else - rtw_secmicappend(&micdata, &pframe[10], 6); + rtw_secmicappend(&micdata, &pframe[10], 6); } else { /* ToDS == 0 */ rtw_secmicappend(&micdata, &pframe[4], 6); /* DA */ if (pframe[1] & 2) /* From Ds == 1 */ @@ -953,12 +951,11 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct mpdu_len -= llc_sz; } - if ((pattrib->icv_len > 0) && (pattrib->bswenc)) { + if ((pattrib->icv_len > 0) && (pattrib->bswenc)) mpdu_len -= pattrib->icv_len; - } if (bmcst) { - /* don't do fragment to broadcat/multicast packets */ + /* don't do fragment to broadcast/multicast packets */ mem_sz = _rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen); } else { mem_sz = _rtw_pktfile_read(&pktfile, pframe, mpdu_len); @@ -1068,7 +1065,6 @@ void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len) } break; } - } void rtw_count_tx_stats(struct adapter *padapter, struct xmit_frame *pxmitframe, int sz) @@ -1315,7 +1311,6 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram rtw_free_xmitframe(pxmitpriv, pxmitframe); } spin_unlock_bh(&pframequeue->lock); - } s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe) @@ -1505,7 +1500,6 @@ void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry) for (i = 0; i < entry; i++, phwxmit++) phwxmit->accnt = 0; - } static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb) @@ -1732,7 +1726,7 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra bool bmcst = is_multicast_ether_addr(pattrib->ra); if (!check_fwstate(pmlmepriv, WIFI_AP_STATE)) - return ret; + return ret; if (pattrib->psta) psta = pattrib->psta; @@ -1760,8 +1754,8 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra pstapriv->tim_bitmap |= BIT(0);/* */ pstapriv->sta_dz_bitmap |= BIT(0); - - update_beacon(padapter, _TIM_IE_, NULL, false);/* tx bc/mc packets after upate bcn */ + /* tx bc/mc packets after update bcn */ + update_beacon(padapter, _TIM_IE_, NULL, false); ret = true; } @@ -1811,7 +1805,7 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra pstapriv->tim_bitmap |= BIT(psta->aid); if (psta->sleepq_len == 1) { - /* upate BCN for TIM IE */ + /* update BCN for TIM IE */ update_beacon(padapter, _TIM_IE_, NULL, false); } } @@ -2080,7 +2074,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) && (wmmps_ac)) { pstapriv->tim_bitmap &= ~BIT(psta->aid); - /* upate BCN for TIM IE */ + /* update BCN for TIM IE */ update_beacon(padapter, _TIM_IE_, NULL, false); } } diff --git a/drivers/staging/r8188eu/hal/Hal8188EPwrSeq.c b/drivers/staging/r8188eu/hal/Hal8188EPwrSeq.c deleted file mode 100644 index 6505e1fcb070..000000000000 --- a/drivers/staging/r8188eu/hal/Hal8188EPwrSeq.c +++ /dev/null @@ -1,45 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2007 - 2011 Realtek Corporation. */ - -#include "../include/Hal8188EPwrSeq.h" -#include "../include/rtl8188e_hal.h" - -struct wl_pwr_cfg rtl8188E_power_on_flow[] = { - { 0x0006, PWR_CMD_POLLING, BIT(1), BIT(1) }, - { 0x0002, PWR_CMD_WRITE, BIT(0) | BIT(1), 0 }, /* reset BB */ - { 0x0026, PWR_CMD_WRITE, BIT(7), BIT(7) }, /* schmitt trigger */ - { 0x0005, PWR_CMD_WRITE, BIT(7), 0 }, /* disable HWPDN (control by DRV)*/ - { 0x0005, PWR_CMD_WRITE, BIT(4) | BIT(3), 0 }, /* disable WL suspend*/ - { 0x0005, PWR_CMD_WRITE, BIT(0), BIT(0) }, - { 0x0005, PWR_CMD_POLLING, BIT(0), 0 }, - { 0x0023, PWR_CMD_WRITE, BIT(4), 0 }, - { 0xFFFF, PWR_CMD_END, 0, 0 }, -}; - -struct wl_pwr_cfg rtl8188E_card_disable_flow[] = { - { 0x001F, PWR_CMD_WRITE, 0xFF, 0 }, /* turn off RF */ - { 0x0023, PWR_CMD_WRITE, BIT(4), BIT(4) }, /* LDO Sleep mode */ - { 0x0005, PWR_CMD_WRITE, BIT(1), BIT(1) }, /* turn off MAC by HW state machine */ - { 0x0005, PWR_CMD_POLLING, BIT(1), 0 }, - { 0x0026, PWR_CMD_WRITE, BIT(7), BIT(7) }, /* schmitt trigger */ - { 0x0005, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) }, /* enable WL suspend */ - { 0x0007, PWR_CMD_WRITE, 0xFF, 0 }, /* enable bandgap mbias in suspend */ - { 0x0041, PWR_CMD_WRITE, BIT(4), 0 }, /* Clear SIC_EN register */ - { 0xfe10, PWR_CMD_WRITE, BIT(4), BIT(4) }, /* Set USB suspend enable local register */ - { 0xFFFF, PWR_CMD_END, 0, 0 }, -}; - -/* This is used by driver for LPSRadioOff Procedure, not for FW LPS Step */ -struct wl_pwr_cfg rtl8188E_enter_lps_flow[] = { - { 0x0522, PWR_CMD_WRITE, 0xFF, 0x7F },/* Tx Pause */ - { 0x05F8, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */ - { 0x05F9, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */ - { 0x05FA, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */ - { 0x05FB, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */ - { 0x0002, PWR_CMD_WRITE, BIT(0), 0 }, /* CCK and OFDM are disabled, clocks are gated */ - { 0x0002, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US }, - { 0x0100, PWR_CMD_WRITE, 0xFF, 0x3F }, /* Reset MAC TRX */ - { 0x0101, PWR_CMD_WRITE, BIT(1), 0 }, /* check if removed later */ - { 0x0553, PWR_CMD_WRITE, BIT(5), BIT(5) }, /* Respond TxOK to scheduler */ - { 0xFFFF, PWR_CMD_END, 0, 0 }, -}; diff --git a/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c index 57e8f5573846..1e04de3a6622 100644 --- a/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c +++ b/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c @@ -279,6 +279,7 @@ static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_inf { /* Wilson 2011/10/26 */ u32 MaskFromReg; s8 i; + int res; switch (pRaInfo->RateID) { case RATR_INX_WIRELESS_NGB: @@ -303,19 +304,31 @@ static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_inf pRaInfo->RAUseRate = (pRaInfo->RateMask) & 0x0000000d; break; case 12: - MaskFromReg = rtw_read32(dm_odm->Adapter, REG_ARFR0); + res = rtw_read32(dm_odm->Adapter, REG_ARFR0, &MaskFromReg); + if (res) + return res; + pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg; break; case 13: - MaskFromReg = rtw_read32(dm_odm->Adapter, REG_ARFR1); + res = rtw_read32(dm_odm->Adapter, REG_ARFR1, &MaskFromReg); + if (res) + return res; + pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg; break; case 14: - MaskFromReg = rtw_read32(dm_odm->Adapter, REG_ARFR2); + res = rtw_read32(dm_odm->Adapter, REG_ARFR2, &MaskFromReg); + if (res) + return res; + pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg; break; case 15: - MaskFromReg = rtw_read32(dm_odm->Adapter, REG_ARFR3); + res = rtw_read32(dm_odm->Adapter, REG_ARFR3, &MaskFromReg); + if (res) + return res; + pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg; break; default: @@ -601,12 +614,12 @@ void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm, u8 *TxRPT_Buf, u16 pRAInfo = &dm_odm->RAInfo[MacId]; if (valid) { - pRAInfo->RTY[0] = (u16)GET_TX_REPORT_TYPE1_RERTY_0(pBuffer); - pRAInfo->RTY[1] = (u16)GET_TX_REPORT_TYPE1_RERTY_1(pBuffer); - pRAInfo->RTY[2] = (u16)GET_TX_REPORT_TYPE1_RERTY_2((u8 *)pBuffer); - pRAInfo->RTY[3] = (u16)GET_TX_REPORT_TYPE1_RERTY_3(pBuffer); - pRAInfo->RTY[4] = (u16)GET_TX_REPORT_TYPE1_RERTY_4(pBuffer); - pRAInfo->DROP = (u16)GET_TX_REPORT_TYPE1_DROP_0(pBuffer); + pRAInfo->RTY[0] = le16_to_cpup((__le16 *)pBuffer); + pRAInfo->RTY[1] = pBuffer[2]; + pRAInfo->RTY[2] = pBuffer[3]; + pRAInfo->RTY[3] = pBuffer[4]; + pRAInfo->RTY[4] = pBuffer[5]; + pRAInfo->DROP = pBuffer[6]; pRAInfo->TOTAL = pRAInfo->RTY[0] + pRAInfo->RTY[1] + pRAInfo->RTY[2] + pRAInfo->RTY[3] + pRAInfo->RTY[4] + pRAInfo->DROP; diff --git a/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c b/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c index b944c8071a3b..525deab10820 100644 --- a/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c +++ b/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c @@ -463,6 +463,7 @@ void _PHY_SaveADDARegisters(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup } } +/* FIXME: return an error to caller */ static void _PHY_SaveMACRegisters( struct adapter *adapt, u32 *MACReg, @@ -470,11 +471,20 @@ static void _PHY_SaveMACRegisters( ) { u32 i; + int res; - for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) - MACBackup[i] = rtw_read8(adapt, MACReg[i]); + for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) { + u8 reg; + + res = rtw_read8(adapt, MACReg[i], ®); + if (res) + return; - MACBackup[i] = rtw_read32(adapt, MACReg[i]); + MACBackup[i] = reg; + } + + res = rtw_read32(adapt, MACReg[i], MACBackup + i); + (void)res; } static void reload_adda_reg(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup, u32 RegiesterNum) @@ -739,9 +749,12 @@ static void phy_LCCalibrate_8188E(struct adapter *adapt) { u8 tmpreg; u32 RF_Amode = 0, LC_Cal; + int res; /* Check continuous TX and Packet TX */ - tmpreg = rtw_read8(adapt, 0xd03); + res = rtw_read8(adapt, 0xd03, &tmpreg); + if (res) + return; if ((tmpreg & 0x70) != 0) /* Deal with contisuous TX case */ rtw_write8(adapt, 0xd03, tmpreg & 0x8F); /* disable all continuous TX */ diff --git a/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c b/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c index 150ea380c39e..6c0b1368383d 100644 --- a/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c +++ b/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c @@ -3,25 +3,116 @@ #include "../include/HalPwrSeqCmd.h" -u8 HalPwrSeqCmdParsing(struct adapter *padapter, struct wl_pwr_cfg pwrseqcmd[]) +#define PWR_CMD_WRITE 0x01 + /* offset: the read register offset */ + /* msk: the mask of the write bits */ + /* value: write value */ + /* note: driver shall implement this cmd by read & msk after write */ + +#define PWR_CMD_POLLING 0x02 + /* offset: the read register offset */ + /* msk: the mask of the polled value */ + /* value: the value to be polled, masked by the msd field. */ + /* note: driver shall implement this cmd by */ + /* do{ */ + /* if ( (Read(offset) & msk) == (value & msk) ) */ + /* break; */ + /* } while (not timeout); */ + +#define PWR_CMD_DELAY 0x03 + /* offset: the value to delay (in us) */ + /* msk: N/A */ + /* value: N/A */ + +struct wl_pwr_cfg { + u16 offset; + u8 cmd:4; + u8 msk; + u8 value; +}; + +#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset +#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd +#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk +#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value + +static struct wl_pwr_cfg rtl8188E_power_on_flow[] = { + { 0x0006, PWR_CMD_POLLING, BIT(1), BIT(1) }, + { 0x0002, PWR_CMD_WRITE, BIT(0) | BIT(1), 0 }, /* reset BB */ + { 0x0026, PWR_CMD_WRITE, BIT(7), BIT(7) }, /* schmitt trigger */ + { 0x0005, PWR_CMD_WRITE, BIT(7), 0 }, /* disable HWPDN (control by DRV)*/ + { 0x0005, PWR_CMD_WRITE, BIT(4) | BIT(3), 0 }, /* disable WL suspend*/ + { 0x0005, PWR_CMD_WRITE, BIT(0), BIT(0) }, + { 0x0005, PWR_CMD_POLLING, BIT(0), 0 }, + { 0x0023, PWR_CMD_WRITE, BIT(4), 0 }, +}; + +static struct wl_pwr_cfg rtl8188E_card_disable_flow[] = { + { 0x001F, PWR_CMD_WRITE, 0xFF, 0 }, /* turn off RF */ + { 0x0023, PWR_CMD_WRITE, BIT(4), BIT(4) }, /* LDO Sleep mode */ + { 0x0005, PWR_CMD_WRITE, BIT(1), BIT(1) }, /* turn off MAC by HW state machine */ + { 0x0005, PWR_CMD_POLLING, BIT(1), 0 }, + { 0x0026, PWR_CMD_WRITE, BIT(7), BIT(7) }, /* schmitt trigger */ + { 0x0005, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) }, /* enable WL suspend */ + { 0x0007, PWR_CMD_WRITE, 0xFF, 0 }, /* enable bandgap mbias in suspend */ + { 0x0041, PWR_CMD_WRITE, BIT(4), 0 }, /* Clear SIC_EN register */ + { 0xfe10, PWR_CMD_WRITE, BIT(4), BIT(4) }, /* Set USB suspend enable local register */ +}; + +/* This is used by driver for LPSRadioOff Procedure, not for FW LPS Step */ +static struct wl_pwr_cfg rtl8188E_enter_lps_flow[] = { + { 0x0522, PWR_CMD_WRITE, 0xFF, 0x7F },/* Tx Pause */ + { 0x05F8, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */ + { 0x05F9, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */ + { 0x05FA, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */ + { 0x05FB, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */ + { 0x0002, PWR_CMD_WRITE, BIT(0), 0 }, /* CCK and OFDM are disabled, clocks are gated */ + { 0x0002, PWR_CMD_DELAY, 0, 0 }, + { 0x0100, PWR_CMD_WRITE, 0xFF, 0x3F }, /* Reset MAC TRX */ + { 0x0101, PWR_CMD_WRITE, BIT(1), 0 }, /* check if removed later */ + { 0x0553, PWR_CMD_WRITE, BIT(5), BIT(5) }, /* Respond TxOK to scheduler */ +}; + +u8 HalPwrSeqCmdParsing(struct adapter *padapter, enum r8188eu_pwr_seq seq) { struct wl_pwr_cfg pwrcfgcmd = {0}; + struct wl_pwr_cfg *pwrseqcmd; u8 poll_bit = false; - u32 aryidx = 0; + u8 idx, num_steps; u8 value = 0; u32 offset = 0; u32 poll_count = 0; /* polling autoload done. */ u32 max_poll_count = 5000; + int res; + + switch (seq) { + case PWR_ON_FLOW: + pwrseqcmd = rtl8188E_power_on_flow; + num_steps = ARRAY_SIZE(rtl8188E_power_on_flow); + break; + case DISABLE_FLOW: + pwrseqcmd = rtl8188E_card_disable_flow; + num_steps = ARRAY_SIZE(rtl8188E_card_disable_flow); + break; + case LPS_ENTER_FLOW: + pwrseqcmd = rtl8188E_enter_lps_flow; + num_steps = ARRAY_SIZE(rtl8188E_enter_lps_flow); + break; + default: + return false; + } - do { - pwrcfgcmd = pwrseqcmd[aryidx]; + for (idx = 0; idx < num_steps; idx++) { + pwrcfgcmd = pwrseqcmd[idx]; switch (GET_PWR_CFG_CMD(pwrcfgcmd)) { case PWR_CMD_WRITE: offset = GET_PWR_CFG_OFFSET(pwrcfgcmd); /* Read the value from system register */ - value = rtw_read8(padapter, offset); + res = rtw_read8(padapter, offset, &value); + if (res) + return false; value &= ~(GET_PWR_CFG_MASK(pwrcfgcmd)); value |= (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd)); @@ -33,7 +124,9 @@ u8 HalPwrSeqCmdParsing(struct adapter *padapter, struct wl_pwr_cfg pwrseqcmd[]) poll_bit = false; offset = GET_PWR_CFG_OFFSET(pwrcfgcmd); do { - value = rtw_read8(padapter, offset); + res = rtw_read8(padapter, offset, &value); + if (res) + return false; value &= GET_PWR_CFG_MASK(pwrcfgcmd); if (value == (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd))) @@ -46,20 +139,11 @@ u8 HalPwrSeqCmdParsing(struct adapter *padapter, struct wl_pwr_cfg pwrseqcmd[]) } while (!poll_bit); break; case PWR_CMD_DELAY: - if (GET_PWR_CFG_VALUE(pwrcfgcmd) == PWRSEQ_DELAY_US) - udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd)); - else - udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd) * 1000); - break; - case PWR_CMD_END: - /* When this command is parsed, end the process */ - return true; + udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd)); break; default: break; } - - aryidx++;/* Add Array Index */ - } while (1); + } return true; } diff --git a/drivers/staging/r8188eu/hal/hal_com.c b/drivers/staging/r8188eu/hal/hal_com.c index 910cc07f656c..6a1cdc67335b 100644 --- a/drivers/staging/r8188eu/hal/hal_com.c +++ b/drivers/staging/r8188eu/hal/hal_com.c @@ -10,45 +10,6 @@ #define _HAL_INIT_C_ -void dump_chip_info(struct HAL_VERSION chip_vers) -{ - uint cnt = 0; - char buf[128]; - - cnt += sprintf((buf + cnt), "Chip Version Info: CHIP_8188E_"); - cnt += sprintf((buf + cnt), "%s_", IS_NORMAL_CHIP(chip_vers) ? - "Normal_Chip" : "Test_Chip"); - cnt += sprintf((buf + cnt), "%s_", IS_CHIP_VENDOR_TSMC(chip_vers) ? - "TSMC" : "UMC"); - - switch (chip_vers.CUTVersion) { - case A_CUT_VERSION: - cnt += sprintf((buf + cnt), "A_CUT_"); - break; - case B_CUT_VERSION: - cnt += sprintf((buf + cnt), "B_CUT_"); - break; - case C_CUT_VERSION: - cnt += sprintf((buf + cnt), "C_CUT_"); - break; - case D_CUT_VERSION: - cnt += sprintf((buf + cnt), "D_CUT_"); - break; - case E_CUT_VERSION: - cnt += sprintf((buf + cnt), "E_CUT_"); - break; - default: - cnt += sprintf((buf + cnt), "UNKNOWN_CUT(%d)_", chip_vers.CUTVersion); - break; - } - - cnt += sprintf((buf + cnt), "1T1R_"); - - cnt += sprintf((buf + cnt), "RomVer(%d)\n", 0); - - pr_info("%s", buf); -} - #define CHAN_PLAN_HW 0x80 u8 /* return the final channel plan decision */ @@ -303,7 +264,9 @@ s32 c2h_evt_read(struct adapter *adapter, u8 *buf) if (!buf) goto exit; - trigger = rtw_read8(adapter, REG_C2HEVT_CLEAR); + ret = rtw_read8(adapter, REG_C2HEVT_CLEAR, &trigger); + if (ret) + return _FAIL; if (trigger == C2H_EVT_HOST_CLOSE) goto exit; /* Not ready */ @@ -314,13 +277,26 @@ s32 c2h_evt_read(struct adapter *adapter, u8 *buf) memset(c2h_evt, 0, 16); - *buf = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL); - *(buf + 1) = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL + 1); + ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL, buf); + if (ret) { + ret = _FAIL; + goto clear_evt; + } + ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL + 1, buf + 1); + if (ret) { + ret = _FAIL; + goto clear_evt; + } /* Read the content */ - for (i = 0; i < c2h_evt->plen; i++) - c2h_evt->payload[i] = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL + - sizeof(*c2h_evt) + i); + for (i = 0; i < c2h_evt->plen; i++) { + ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL + + sizeof(*c2h_evt) + i, c2h_evt->payload + i); + if (ret) { + ret = _FAIL; + goto clear_evt; + } + } ret = _SUCCESS; diff --git a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c index 475650dc7301..b01ee1695fee 100644 --- a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c +++ b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c @@ -18,13 +18,18 @@ static u8 _is_fw_read_cmd_down(struct adapter *adapt, u8 msgbox_num) { - u8 read_down = false; + u8 read_down = false, reg; int retry_cnts = 100; + int res; u8 valid; do { - valid = rtw_read8(adapt, REG_HMETFR) & BIT(msgbox_num); + res = rtw_read8(adapt, REG_HMETFR, ®); + if (res) + continue; + + valid = reg & BIT(msgbox_num); if (0 == valid) read_down = true; } while ((!read_down) && (retry_cnts--)); @@ -533,6 +538,8 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus) bool bcn_valid = false; u8 DLBcnCount = 0; u32 poll = 0; + u8 reg; + int res; if (mstatus == 1) { /* We should set AID, correct TSF, HW seq enable before set JoinBssReport to Fw in 88/92C. */ @@ -547,8 +554,17 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus) /* Disable Hw protection for a time which revserd for Hw sending beacon. */ /* Fix download reserved page packet fail that access collision with the protection time. */ /* 2010.05.11. Added by tynli. */ - rtw_write8(adapt, REG_BCN_CTRL, rtw_read8(adapt, REG_BCN_CTRL) & (~BIT(3))); - rtw_write8(adapt, REG_BCN_CTRL, rtw_read8(adapt, REG_BCN_CTRL) | BIT(4)); + res = rtw_read8(adapt, REG_BCN_CTRL, ®); + if (res) + return; + + rtw_write8(adapt, REG_BCN_CTRL, reg & (~BIT(3))); + + res = rtw_read8(adapt, REG_BCN_CTRL, ®); + if (res) + return; + + rtw_write8(adapt, REG_BCN_CTRL, reg | BIT(4)); if (haldata->RegFwHwTxQCtrl & BIT(6)) bSendBeacon = true; @@ -581,8 +597,17 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus) /* */ /* Enable Bcn */ - rtw_write8(adapt, REG_BCN_CTRL, rtw_read8(adapt, REG_BCN_CTRL) | BIT(3)); - rtw_write8(adapt, REG_BCN_CTRL, rtw_read8(adapt, REG_BCN_CTRL) & (~BIT(4))); + res = rtw_read8(adapt, REG_BCN_CTRL, ®); + if (res) + return; + + rtw_write8(adapt, REG_BCN_CTRL, reg | BIT(3)); + + res = rtw_read8(adapt, REG_BCN_CTRL, ®); + if (res) + return; + + rtw_write8(adapt, REG_BCN_CTRL, reg & (~BIT(4))); /* To make sure that if there exists an adapter which would like to send beacon. */ /* If exists, the origianl value of 0x422[6] will be 1, we should check this to */ diff --git a/drivers/staging/r8188eu/hal/rtl8188e_dm.c b/drivers/staging/r8188eu/hal/rtl8188e_dm.c index 6d28e3dc0d26..0399872c4546 100644 --- a/drivers/staging/r8188eu/hal/rtl8188e_dm.c +++ b/drivers/staging/r8188eu/hal/rtl8188e_dm.c @@ -12,8 +12,12 @@ static void dm_InitGPIOSetting(struct adapter *Adapter) { u8 tmp1byte; + int res; + + res = rtw_read8(Adapter, REG_GPIO_MUXCFG, &tmp1byte); + if (res) + return; - tmp1byte = rtw_read8(Adapter, REG_GPIO_MUXCFG); tmp1byte &= (GPIOSEL_GPIO | ~GPIOSEL_ENBT); rtw_write8(Adapter, REG_GPIO_MUXCFG, tmp1byte); diff --git a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c index e17375a74f17..5b8f1a912bbb 100644 --- a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c +++ b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c @@ -13,10 +13,14 @@ static void iol_mode_enable(struct adapter *padapter, u8 enable) { u8 reg_0xf0 = 0; + int res; if (enable) { /* Enable initial offload */ - reg_0xf0 = rtw_read8(padapter, REG_SYS_CFG); + res = rtw_read8(padapter, REG_SYS_CFG, ®_0xf0); + if (res) + return; + rtw_write8(padapter, REG_SYS_CFG, reg_0xf0 | SW_OFFLOAD_EN); if (!padapter->bFWReady) @@ -24,7 +28,10 @@ static void iol_mode_enable(struct adapter *padapter, u8 enable) } else { /* disable initial offload */ - reg_0xf0 = rtw_read8(padapter, REG_SYS_CFG); + res = rtw_read8(padapter, REG_SYS_CFG, ®_0xf0); + if (res) + return; + rtw_write8(padapter, REG_SYS_CFG, reg_0xf0 & ~SW_OFFLOAD_EN); } } @@ -34,17 +41,31 @@ static s32 iol_execute(struct adapter *padapter, u8 control) s32 status = _FAIL; u8 reg_0x88 = 0; unsigned long timeout; + int res; control = control & 0x0f; - reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0); + res = rtw_read8(padapter, REG_HMEBOX_E0, ®_0x88); + if (res) + return _FAIL; + rtw_write8(padapter, REG_HMEBOX_E0, reg_0x88 | control); timeout = jiffies + msecs_to_jiffies(1000); - while ((reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0)) & control && - time_before(jiffies, timeout)) - ; - reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0); + do { + res = rtw_read8(padapter, REG_HMEBOX_E0, ®_0x88); + if (res) + continue; + + if (!(reg_0x88 & control)) + break; + + } while (time_before(jiffies, timeout)); + + res = rtw_read8(padapter, REG_HMEBOX_E0, ®_0x88); + if (res) + return _FAIL; + status = (reg_0x88 & control) ? _FAIL : _SUCCESS; if (reg_0x88 & control << 4) status = _FAIL; @@ -62,7 +83,7 @@ static s32 iol_InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy) } static void -efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf) +efuse_phymap_to_logical(u8 *phymap, u16 _size_byte, u8 *pbuf) { u8 *efuseTbl = NULL; u8 rtemp8; @@ -70,7 +91,6 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf) u8 offset, wren; u16 i, j; u16 **eFuseWord = NULL; - u16 efuse_utilized = 0; u8 u1temp = 0; efuseTbl = kzalloc(EFUSE_MAP_LEN_88E, GFP_KERNEL); @@ -92,7 +112,6 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf) /* */ rtemp8 = *(phymap + eFuse_Addr); if (rtemp8 != 0xFF) { - efuse_utilized++; eFuse_Addr++; } else { goto exit; @@ -130,13 +149,11 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf) if (!(wren & 0x01)) { rtemp8 = *(phymap + eFuse_Addr); eFuse_Addr++; - efuse_utilized++; eFuseWord[offset][i] = (rtemp8 & 0xff); if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E) break; rtemp8 = *(phymap + eFuse_Addr); eFuse_Addr++; - efuse_utilized++; eFuseWord[offset][i] |= (((u16)rtemp8 << 8) & 0xff00); if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E) @@ -149,7 +166,6 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf) rtemp8 = *(phymap + eFuse_Addr); if (rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E)) { - efuse_utilized++; eFuse_Addr++; } } @@ -167,59 +183,70 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf) /* */ /* 4. Copy from Efuse map to output pointer memory!!! */ /* */ - for (i = 0; i < _size_byte; i++) - pbuf[i] = efuseTbl[_offset + i]; - - /* */ - /* 5. Calculate Efuse utilization. */ - /* */ + memcpy(pbuf, efuseTbl, _size_byte); exit: kfree(efuseTbl); kfree(eFuseWord); } -static void efuse_read_phymap_from_txpktbuf( +/* FIXME: add error handling in callers */ +static int efuse_read_phymap_from_txpktbuf( struct adapter *adapter, - int bcnhead, /* beacon head, where FW store len(2-byte) and efuse physical map. */ u8 *content, /* buffer to store efuse physical map */ u16 *size /* for efuse content: the max byte to read. will update to byte read */ ) { unsigned long timeout; - u16 dbg_addr = 0; __le32 lo32 = 0, hi32 = 0; u16 len = 0, count = 0; - int i = 0; + int i = 0, res; u16 limit = *size; - + u8 reg; u8 *pos = content; - - if (bcnhead < 0) /* if not valid */ - bcnhead = rtw_read8(adapter, REG_TDECTRL + 1); + u32 reg32; rtw_write8(adapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT); - dbg_addr = bcnhead * 128 / 8; /* 8-bytes addressing */ - while (1) { - rtw_write16(adapter, REG_PKTBUF_DBG_ADDR, dbg_addr + i); + rtw_write16(adapter, REG_PKTBUF_DBG_ADDR, i); rtw_write8(adapter, REG_TXPKTBUF_DBG, 0); timeout = jiffies + msecs_to_jiffies(1000); - while (!rtw_read8(adapter, REG_TXPKTBUF_DBG) && time_before(jiffies, timeout)) - rtw_usleep_os(100); + do { + res = rtw_read8(adapter, REG_TXPKTBUF_DBG, ®); + if (res) + continue; + + if (reg) + break; + + msleep(1); + } while (time_before(jiffies, timeout)); /* data from EEPROM needs to be in LE */ - lo32 = cpu_to_le32(rtw_read32(adapter, REG_PKTBUF_DBG_DATA_L)); - hi32 = cpu_to_le32(rtw_read32(adapter, REG_PKTBUF_DBG_DATA_H)); + res = rtw_read32(adapter, REG_PKTBUF_DBG_DATA_L, ®32); + if (res) + return res; + + lo32 = cpu_to_le32(reg32); + + res = rtw_read32(adapter, REG_PKTBUF_DBG_DATA_H, ®32); + if (res) + return res; + + hi32 = cpu_to_le32(reg32); if (i == 0) { + u16 reg; + /* Although lenc is only used in a debug statement, * do not remove it as the rtw_read16() call consumes * 2 bytes from the EEPROM source. */ - rtw_read16(adapter, REG_PKTBUF_DBG_DATA_L); + res = rtw_read16(adapter, REG_PKTBUF_DBG_DATA_L, ®); + if (res) + return res; len = le32_to_cpu(lo32) & 0x0000ffff; @@ -246,21 +273,23 @@ static void efuse_read_phymap_from_txpktbuf( } rtw_write8(adapter, REG_PKT_BUFF_ACCESS_CTRL, DISABLE_TRXPKT_BUF_ACCESS); *size = count; + + return 0; } -static s32 iol_read_efuse(struct adapter *padapter, u8 txpktbuf_bndy, u16 offset, u16 size_byte, u8 *logical_map) +static s32 iol_read_efuse(struct adapter *padapter, u16 size_byte, u8 *logical_map) { s32 status = _FAIL; u8 physical_map[512]; u16 size = 512; - rtw_write8(padapter, REG_TDECTRL + 1, txpktbuf_bndy); + rtw_write8(padapter, REG_TDECTRL + 1, 0); memset(physical_map, 0xFF, 512); rtw_write8(padapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT); status = iol_execute(padapter, CMD_READ_EFUSE_MAP); if (status == _SUCCESS) - efuse_read_phymap_from_txpktbuf(padapter, txpktbuf_bndy, physical_map, &size); - efuse_phymap_to_logical(physical_map, offset, size_byte, logical_map); + efuse_read_phymap_from_txpktbuf(padapter, physical_map, &size); + efuse_phymap_to_logical(physical_map, size_byte, logical_map); return status; } @@ -321,25 +350,35 @@ exit: void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 PwrState) { u16 tmpV16; + int res; if (PwrState) { rtw_write8(pAdapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON); /* 1.2V Power: From VDDON with Power Cut(0x0000h[15]), defualt valid */ - tmpV16 = rtw_read16(pAdapter, REG_SYS_ISO_CTRL); + res = rtw_read16(pAdapter, REG_SYS_ISO_CTRL, &tmpV16); + if (res) + return; + if (!(tmpV16 & PWC_EV12V)) { tmpV16 |= PWC_EV12V; rtw_write16(pAdapter, REG_SYS_ISO_CTRL, tmpV16); } /* Reset: 0x0000h[28], default valid */ - tmpV16 = rtw_read16(pAdapter, REG_SYS_FUNC_EN); + res = rtw_read16(pAdapter, REG_SYS_FUNC_EN, &tmpV16); + if (res) + return; + if (!(tmpV16 & FEN_ELDR)) { tmpV16 |= FEN_ELDR; rtw_write16(pAdapter, REG_SYS_FUNC_EN, tmpV16); } /* Clock: Gated(0x0008h[5]) 8M(0x0008h[1]) clock from ANA, default valid */ - tmpV16 = rtw_read16(pAdapter, REG_SYS_CLKR); + res = rtw_read16(pAdapter, REG_SYS_CLKR, &tmpV16); + if (res) + return; + if ((!(tmpV16 & LOADER_CLK_EN)) || (!(tmpV16 & ANA8M))) { tmpV16 |= (LOADER_CLK_EN | ANA8M); rtw_write16(pAdapter, REG_SYS_CLKR, tmpV16); @@ -470,26 +509,60 @@ exit: kfree(eFuseWord); } -static void ReadEFuseByIC(struct adapter *Adapter, u16 _offset, u16 _size_byte, u8 *pbuf) +void rtl8188e_ReadEFuse(struct adapter *Adapter, u16 _size_byte, u8 *pbuf) { int ret = _FAIL; if (rtw_IOL_applied(Adapter)) { rtl8188eu_InitPowerOn(Adapter); iol_mode_enable(Adapter, 1); - ret = iol_read_efuse(Adapter, 0, _offset, _size_byte, pbuf); + ret = iol_read_efuse(Adapter, _size_byte, pbuf); iol_mode_enable(Adapter, 0); if (_SUCCESS == ret) return; } - Hal_EfuseReadEFuse88E(Adapter, _offset, _size_byte, pbuf); + Hal_EfuseReadEFuse88E(Adapter, 0, _size_byte, pbuf); } -void rtl8188e_ReadEFuse(struct adapter *Adapter, u16 _offset, u16 _size_byte, u8 *pbuf) +static void dump_chip_info(struct HAL_VERSION chip_vers) { - ReadEFuseByIC(Adapter, _offset, _size_byte, pbuf); + uint cnt = 0; + char buf[128]; + + cnt += sprintf((buf + cnt), "Chip Version Info: CHIP_8188E_"); + cnt += sprintf((buf + cnt), "%s_", IS_NORMAL_CHIP(chip_vers) ? + "Normal_Chip" : "Test_Chip"); + cnt += sprintf((buf + cnt), "%s_", IS_CHIP_VENDOR_TSMC(chip_vers) ? + "TSMC" : "UMC"); + + switch (chip_vers.CUTVersion) { + case A_CUT_VERSION: + cnt += sprintf((buf + cnt), "A_CUT_"); + break; + case B_CUT_VERSION: + cnt += sprintf((buf + cnt), "B_CUT_"); + break; + case C_CUT_VERSION: + cnt += sprintf((buf + cnt), "C_CUT_"); + break; + case D_CUT_VERSION: + cnt += sprintf((buf + cnt), "D_CUT_"); + break; + case E_CUT_VERSION: + cnt += sprintf((buf + cnt), "E_CUT_"); + break; + default: + cnt += sprintf((buf + cnt), "UNKNOWN_CUT(%d)_", chip_vers.CUTVersion); + break; + } + + cnt += sprintf((buf + cnt), "1T1R_"); + + cnt += sprintf((buf + cnt), "RomVer(%d)\n", 0); + + pr_info("%s", buf); } void rtl8188e_read_chip_version(struct adapter *padapter) @@ -497,8 +570,12 @@ void rtl8188e_read_chip_version(struct adapter *padapter) u32 value32; struct HAL_VERSION ChipVersion; struct hal_data_8188e *pHalData = &padapter->haldata; + int res; + + res = rtw_read32(padapter, REG_SYS_CFG, &value32); + if (res) + return; - value32 = rtw_read32(padapter, REG_SYS_CFG); ChipVersion.ChipType = ((value32 & RTL_ID) ? TEST_CHIP : NORMAL_CHIP); ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC); @@ -525,10 +602,17 @@ void rtl8188e_SetHalODMVar(struct adapter *Adapter, void *pValue1, bool bSet) void hal_notch_filter_8188e(struct adapter *adapter, bool enable) { + int res; + u8 reg; + + res = rtw_read8(adapter, rOFDM0_RxDSP + 1, ®); + if (res) + return; + if (enable) - rtw_write8(adapter, rOFDM0_RxDSP + 1, rtw_read8(adapter, rOFDM0_RxDSP + 1) | BIT(1)); + rtw_write8(adapter, rOFDM0_RxDSP + 1, reg | BIT(1)); else - rtw_write8(adapter, rOFDM0_RxDSP + 1, rtw_read8(adapter, rOFDM0_RxDSP + 1) & ~BIT(1)); + rtw_write8(adapter, rOFDM0_RxDSP + 1, reg & ~BIT(1)); } /* */ @@ -538,26 +622,24 @@ void hal_notch_filter_8188e(struct adapter *adapter, bool enable) /* */ static s32 _LLTWrite(struct adapter *padapter, u32 address, u32 data) { - s32 status = _SUCCESS; - s32 count = 0; + s32 count; u32 value = _LLT_INIT_ADDR(address) | _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS); u16 LLTReg = REG_LLT_INIT; + int res; rtw_write32(padapter, LLTReg, value); /* polling */ - do { - value = rtw_read32(padapter, LLTReg); - if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value)) - break; + for (count = 0; count <= POLLING_LLT_THRESHOLD; count++) { + res = rtw_read32(padapter, LLTReg, &value); + if (res) + continue; - if (count > POLLING_LLT_THRESHOLD) { - status = _FAIL; + if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value)) break; - } - } while (count++); + } - return status; + return count > POLLING_LLT_THRESHOLD ? _FAIL : _SUCCESS; } s32 InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy) diff --git a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c index 4864dafd887b..dea6d915a1f4 100644 --- a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c +++ b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c @@ -56,8 +56,12 @@ rtl8188e_PHY_QueryBBReg( ) { u32 ReturnValue = 0, OriginalValue, BitShift; + int res; + + res = rtw_read32(Adapter, RegAddr, &OriginalValue); + if (res) + return 0; - OriginalValue = rtw_read32(Adapter, RegAddr); BitShift = phy_CalculateBitShift(BitMask); ReturnValue = (OriginalValue & BitMask) >> BitShift; return ReturnValue; @@ -84,9 +88,13 @@ rtl8188e_PHY_QueryBBReg( void rtl8188e_PHY_SetBBReg(struct adapter *Adapter, u32 RegAddr, u32 BitMask, u32 Data) { u32 OriginalValue, BitShift; + int res; if (BitMask != bMaskDWord) { /* if not "double word" write */ - OriginalValue = rtw_read32(Adapter, RegAddr); + res = rtw_read32(Adapter, RegAddr, &OriginalValue); + if (res) + return; + BitShift = phy_CalculateBitShift(BitMask); Data = ((OriginalValue & (~BitMask)) | (Data << BitShift)); } @@ -484,13 +492,17 @@ PHY_BBConfig8188E( { int rtStatus = _SUCCESS; struct hal_data_8188e *pHalData = &Adapter->haldata; - u32 RegVal; + u16 RegVal; u8 CrystalCap; + int res; phy_InitBBRFRegisterDefinition(Adapter); /* Enable BB and RF */ - RegVal = rtw_read16(Adapter, REG_SYS_FUNC_EN); + res = rtw_read16(Adapter, REG_SYS_FUNC_EN, &RegVal); + if (res) + return _FAIL; + rtw_write16(Adapter, REG_SYS_FUNC_EN, (u16)(RegVal | BIT(13) | BIT(0) | BIT(1))); /* 20090923 Joseph: Advised by Steven and Jenyu. Power sequence before init RF. */ @@ -594,6 +606,7 @@ _PHY_SetBWMode92C( struct hal_data_8188e *pHalData = &Adapter->haldata; u8 regBwOpMode; u8 regRRSR_RSC; + int res; if (Adapter->bDriverStopped) return; @@ -602,8 +615,13 @@ _PHY_SetBWMode92C( /* 3<1>Set MAC register */ /* 3 */ - regBwOpMode = rtw_read8(Adapter, REG_BWOPMODE); - regRRSR_RSC = rtw_read8(Adapter, REG_RRSR + 2); + res = rtw_read8(Adapter, REG_BWOPMODE, ®BwOpMode); + if (res) + return; + + res = rtw_read8(Adapter, REG_RRSR + 2, ®RRSR_RSC); + if (res) + return; switch (pHalData->CurrentChannelBW) { case HT_CHANNEL_WIDTH_20: diff --git a/drivers/staging/r8188eu/hal/rtl8188eu_recv.c b/drivers/staging/r8188eu/hal/rtl8188eu_recv.c index 727e1adce1dc..def6d0d6e402 100644 --- a/drivers/staging/r8188eu/hal/rtl8188eu_recv.c +++ b/drivers/staging/r8188eu/hal/rtl8188eu_recv.c @@ -32,7 +32,7 @@ int rtl8188eu_init_recv_priv(struct adapter *padapter) goto exit; } - precvpriv->precv_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(precvpriv->pallocated_recv_buf), 4); + precvpriv->precv_buf = (u8 *)ALIGN((size_t)(precvpriv->pallocated_recv_buf), 4); precvbuf = (struct recv_buf *)precvpriv->precv_buf; diff --git a/drivers/staging/r8188eu/hal/usb_halinit.c b/drivers/staging/r8188eu/hal/usb_halinit.c index a217272a07f8..ff074d246dab 100644 --- a/drivers/staging/r8188eu/hal/usb_halinit.c +++ b/drivers/staging/r8188eu/hal/usb_halinit.c @@ -11,7 +11,7 @@ #include "../include/rtw_iol.h" #include "../include/usb_ops.h" #include "../include/usb_osintf.h" -#include "../include/Hal8188EPwrSeq.h" +#include "../include/HalPwrSeqCmd.h" static void _ConfigNormalChipOutEP_8188E(struct adapter *adapt, u8 NumOutPipe) { @@ -52,12 +52,14 @@ void rtl8188eu_interface_configure(struct adapter *adapt) u32 rtl8188eu_InitPowerOn(struct adapter *adapt) { u16 value16; + int res; + /* HW Power on sequence */ struct hal_data_8188e *haldata = &adapt->haldata; if (haldata->bMacPwrCtrlOn) return _SUCCESS; - if (!HalPwrSeqCmdParsing(adapt, Rtl8188E_NIC_PWR_ON_FLOW)) + if (!HalPwrSeqCmdParsing(adapt, PWR_ON_FLOW)) return _FAIL; /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */ @@ -65,7 +67,10 @@ u32 rtl8188eu_InitPowerOn(struct adapter *adapt) rtw_write16(adapt, REG_CR, 0x00); /* suggseted by zhouzhou, by page, 20111230 */ /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */ - value16 = rtw_read16(adapt, REG_CR); + res = rtw_read16(adapt, REG_CR, &value16); + if (res) + return _FAIL; + value16 |= (HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN | PROTOCOL_EN | SCHEDULE_EN | ENSEC | CALTMR_EN); /* for SDIO - Set CR bit10 to enable 32k calibration. Suggested by SD1 Gimmy. Added by tynli. 2011.08.31. */ @@ -81,6 +86,7 @@ static void _InitInterrupt(struct adapter *Adapter) { u32 imr, imr_ex; u8 usb_opt; + int res; /* HISR write one to clear */ rtw_write32(Adapter, REG_HISR_88E, 0xFFFFFFFF); @@ -94,7 +100,9 @@ static void _InitInterrupt(struct adapter *Adapter) /* REG_USB_SPECIAL_OPTION - BIT(4) */ /* 0; Use interrupt endpoint to upload interrupt pkt */ /* 1; Use bulk endpoint to upload interrupt pkt, */ - usb_opt = rtw_read8(Adapter, REG_USB_SPECIAL_OPTION); + res = rtw_read8(Adapter, REG_USB_SPECIAL_OPTION, &usb_opt); + if (res) + return; if (adapter_to_dvobj(Adapter)->pusbdev->speed == USB_SPEED_HIGH) usb_opt = usb_opt | (INT_BULK_SEL); @@ -163,7 +171,14 @@ static void _InitNormalChipRegPriority(struct adapter *Adapter, u16 beQ, u16 bkQ, u16 viQ, u16 voQ, u16 mgtQ, u16 hiQ) { - u16 value16 = (rtw_read16(Adapter, REG_TRXDMA_CTRL) & 0x7); + u16 value16; + int res; + + res = rtw_read16(Adapter, REG_TRXDMA_CTRL, &value16); + if (res) + return; + + value16 &= 0x7; value16 |= _TXDMA_BEQ_MAP(beQ) | _TXDMA_BKQ_MAP(bkQ) | _TXDMA_VIQ_MAP(viQ) | _TXDMA_VOQ_MAP(voQ) | @@ -282,8 +297,12 @@ static void _InitQueuePriority(struct adapter *Adapter) static void _InitNetworkType(struct adapter *Adapter) { u32 value32; + int res; + + res = rtw_read32(Adapter, REG_CR, &value32); + if (res) + return; - value32 = rtw_read32(Adapter, REG_CR); /* TODO: use the other function to set network type */ value32 = (value32 & ~MASK_NETTYPE) | _NETTYPE(NT_LINK_AP); @@ -323,9 +342,13 @@ static void _InitAdaptiveCtrl(struct adapter *Adapter) { u16 value16; u32 value32; + int res; /* Response Rate Set */ - value32 = rtw_read32(Adapter, REG_RRSR); + res = rtw_read32(Adapter, REG_RRSR, &value32); + if (res) + return; + value32 &= ~RATE_BITMAP_ALL; value32 |= RATE_RRSR_CCK_ONLY_1M; rtw_write32(Adapter, REG_RRSR, value32); @@ -363,8 +386,12 @@ static void _InitEDCA(struct adapter *Adapter) static void _InitRetryFunction(struct adapter *Adapter) { u8 value8; + int res; + + res = rtw_read8(Adapter, REG_FWHW_TXQ_CTRL, &value8); + if (res) + return; - value8 = rtw_read8(Adapter, REG_FWHW_TXQ_CTRL); value8 |= EN_AMPDU_RTY_NEW; rtw_write8(Adapter, REG_FWHW_TXQ_CTRL, value8); @@ -390,11 +417,15 @@ static void _InitRetryFunction(struct adapter *Adapter) static void usb_AggSettingTxUpdate(struct adapter *Adapter) { u32 value32; + int res; if (Adapter->registrypriv.wifi_spec) return; - value32 = rtw_read32(Adapter, REG_TDECTRL); + res = rtw_read32(Adapter, REG_TDECTRL, &value32); + if (res) + return; + value32 = value32 & ~(BLK_DESC_NUM_MASK << BLK_DESC_NUM_SHIFT); value32 |= ((USB_TXAGG_DESC_NUM & BLK_DESC_NUM_MASK) << BLK_DESC_NUM_SHIFT); @@ -423,9 +454,15 @@ usb_AggSettingRxUpdate( { u8 valueDMA; u8 valueUSB; + int res; - valueDMA = rtw_read8(Adapter, REG_TRXDMA_CTRL); - valueUSB = rtw_read8(Adapter, REG_USB_SPECIAL_OPTION); + res = rtw_read8(Adapter, REG_TRXDMA_CTRL, &valueDMA); + if (res) + return; + + res = rtw_read8(Adapter, REG_USB_SPECIAL_OPTION, &valueUSB); + if (res) + return; valueDMA |= RXDMA_AGG_EN; valueUSB &= ~USB_AGG_EN; @@ -446,9 +483,11 @@ static void InitUsbAggregationSetting(struct adapter *Adapter) usb_AggSettingRxUpdate(Adapter); } -static void _InitBeaconParameters(struct adapter *Adapter) +/* FIXME: add error handling in callers */ +static int _InitBeaconParameters(struct adapter *Adapter) { struct hal_data_8188e *haldata = &Adapter->haldata; + int res; rtw_write16(Adapter, REG_BCN_CTRL, 0x1010); @@ -461,9 +500,19 @@ static void _InitBeaconParameters(struct adapter *Adapter) /* beacause test chip does not contension before sending beacon. by tynli. 2009.11.03 */ rtw_write16(Adapter, REG_BCNTCFG, 0x660F); - haldata->RegFwHwTxQCtrl = rtw_read8(Adapter, REG_FWHW_TXQ_CTRL + 2); - haldata->RegReg542 = rtw_read8(Adapter, REG_TBTT_PROHIBIT + 2); - haldata->RegCR_1 = rtw_read8(Adapter, REG_CR + 1); + res = rtw_read8(Adapter, REG_FWHW_TXQ_CTRL + 2, &haldata->RegFwHwTxQCtrl); + if (res) + return res; + + res = rtw_read8(Adapter, REG_TBTT_PROHIBIT + 2, &haldata->RegReg542); + if (res) + return res; + + res = rtw_read8(Adapter, REG_CR + 1, &haldata->RegCR_1); + if (res) + return res; + + return 0; } static void _BeaconFunctionEnable(struct adapter *Adapter, @@ -484,11 +533,17 @@ static void _BBTurnOnBlock(struct adapter *Adapter) static void _InitAntenna_Selection(struct adapter *Adapter) { struct hal_data_8188e *haldata = &Adapter->haldata; + int res; + u32 reg; if (haldata->AntDivCfg == 0) return; - rtw_write32(Adapter, REG_LEDCFG0, rtw_read32(Adapter, REG_LEDCFG0) | BIT(23)); + res = rtw_read32(Adapter, REG_LEDCFG0, ®); + if (res) + return; + + rtw_write32(Adapter, REG_LEDCFG0, reg | BIT(23)); rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter, BIT(13), 0x01); if (rtl8188e_PHY_QueryBBReg(Adapter, rFPGA0_XA_RFInterfaceOE, 0x300) == Antenna_A) @@ -514,9 +569,11 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter) u16 value16; u8 txpktbuf_bndy; u32 status = _SUCCESS; + int res; struct hal_data_8188e *haldata = &Adapter->haldata; struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv; struct registry_priv *pregistrypriv = &Adapter->registrypriv; + u32 reg; if (Adapter->pwrctrlpriv.bkeepfwalive) { if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) { @@ -614,13 +671,19 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter) /* Hw bug which Hw initials RxFF boundary size to a value which is larger than the real Rx buffer size in 88E. */ /* */ /* Enable MACTXEN/MACRXEN block */ - value16 = rtw_read16(Adapter, REG_CR); + res = rtw_read16(Adapter, REG_CR, &value16); + if (res) + return _FAIL; + value16 |= (MACTXEN | MACRXEN); rtw_write8(Adapter, REG_CR, value16); /* Enable TX Report */ /* Enable Tx Report Timer */ - value8 = rtw_read8(Adapter, REG_TX_RPT_CTRL); + res = rtw_read8(Adapter, REG_TX_RPT_CTRL, &value8); + if (res) + return _FAIL; + rtw_write8(Adapter, REG_TX_RPT_CTRL, (value8 | BIT(1) | BIT(0))); /* Set MAX RPT MACID */ rtw_write8(Adapter, REG_TX_RPT_CTRL + 1, 2);/* FOR sta mode ,0: bc/mc ,1:AP */ @@ -684,7 +747,11 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter) rtw_write16(Adapter, REG_TX_RPT_TIME, 0x3DF0); /* enable tx DMA to drop the redundate data of packet */ - rtw_write16(Adapter, REG_TXDMA_OFFSET_CHK, (rtw_read16(Adapter, REG_TXDMA_OFFSET_CHK) | DROP_DATA_EN)); + res = rtw_read16(Adapter, REG_TXDMA_OFFSET_CHK, &value16); + if (res) + return _FAIL; + + rtw_write16(Adapter, REG_TXDMA_OFFSET_CHK, (value16 | DROP_DATA_EN)); /* 2010/08/26 MH Merge from 8192CE. */ if (pwrctrlpriv->rf_pwrstate == rf_on) { @@ -704,7 +771,11 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter) rtw_write8(Adapter, REG_USB_HRPWM, 0); /* ack for xmit mgmt frames. */ - rtw_write32(Adapter, REG_FWHW_TXQ_CTRL, rtw_read32(Adapter, REG_FWHW_TXQ_CTRL) | BIT(12)); + res = rtw_read32(Adapter, REG_FWHW_TXQ_CTRL, ®); + if (res) + return _FAIL; + + rtw_write32(Adapter, REG_FWHW_TXQ_CTRL, reg | BIT(12)); exit: return status; @@ -714,23 +785,33 @@ static void CardDisableRTL8188EU(struct adapter *Adapter) { u8 val8; struct hal_data_8188e *haldata = &Adapter->haldata; + int res; /* Stop Tx Report Timer. 0x4EC[Bit1]=b'0 */ - val8 = rtw_read8(Adapter, REG_TX_RPT_CTRL); + res = rtw_read8(Adapter, REG_TX_RPT_CTRL, &val8); + if (res) + return; + rtw_write8(Adapter, REG_TX_RPT_CTRL, val8 & (~BIT(1))); /* stop rx */ rtw_write8(Adapter, REG_CR, 0x0); /* Run LPS WL RFOFF flow */ - HalPwrSeqCmdParsing(Adapter, Rtl8188E_NIC_LPS_ENTER_FLOW); + HalPwrSeqCmdParsing(Adapter, LPS_ENTER_FLOW); /* 2. 0x1F[7:0] = 0 turn off RF */ - val8 = rtw_read8(Adapter, REG_MCUFWDL); + res = rtw_read8(Adapter, REG_MCUFWDL, &val8); + if (res) + return; + if ((val8 & RAM_DL_SEL) && Adapter->bFWReady) { /* 8051 RAM code */ /* Reset MCU 0x2[10]=0. */ - val8 = rtw_read8(Adapter, REG_SYS_FUNC_EN + 1); + res = rtw_read8(Adapter, REG_SYS_FUNC_EN + 1, &val8); + if (res) + return; + val8 &= ~BIT(2); /* 0x2[10], FEN_CPUEN */ rtw_write8(Adapter, REG_SYS_FUNC_EN + 1, val8); } @@ -740,26 +821,45 @@ static void CardDisableRTL8188EU(struct adapter *Adapter) /* YJ,add,111212 */ /* Disable 32k */ - val8 = rtw_read8(Adapter, REG_32K_CTRL); + res = rtw_read8(Adapter, REG_32K_CTRL, &val8); + if (res) + return; + rtw_write8(Adapter, REG_32K_CTRL, val8 & (~BIT(0))); /* Card disable power action flow */ - HalPwrSeqCmdParsing(Adapter, Rtl8188E_NIC_DISABLE_FLOW); + HalPwrSeqCmdParsing(Adapter, DISABLE_FLOW); /* Reset MCU IO Wrapper */ - val8 = rtw_read8(Adapter, REG_RSV_CTRL + 1); + res = rtw_read8(Adapter, REG_RSV_CTRL + 1, &val8); + if (res) + return; + rtw_write8(Adapter, REG_RSV_CTRL + 1, (val8 & (~BIT(3)))); - val8 = rtw_read8(Adapter, REG_RSV_CTRL + 1); + + res = rtw_read8(Adapter, REG_RSV_CTRL + 1, &val8); + if (res) + return; + rtw_write8(Adapter, REG_RSV_CTRL + 1, val8 | BIT(3)); /* YJ,test add, 111207. For Power Consumption. */ - val8 = rtw_read8(Adapter, GPIO_IN); + res = rtw_read8(Adapter, GPIO_IN, &val8); + if (res) + return; + rtw_write8(Adapter, GPIO_OUT, val8); rtw_write8(Adapter, GPIO_IO_SEL, 0xFF);/* Reg0x46 */ - val8 = rtw_read8(Adapter, REG_GPIO_IO_SEL); + res = rtw_read8(Adapter, REG_GPIO_IO_SEL, &val8); + if (res) + return; + rtw_write8(Adapter, REG_GPIO_IO_SEL, (val8 << 4)); - val8 = rtw_read8(Adapter, REG_GPIO_IO_SEL + 1); + res = rtw_read8(Adapter, REG_GPIO_IO_SEL + 1, &val8); + if (res) + return; + rtw_write8(Adapter, REG_GPIO_IO_SEL + 1, val8 | 0x0F);/* Reg0x43 */ rtw_write32(Adapter, REG_BB_PAD_CTRL, 0x00080808);/* set LNA ,TRSW,EX_PA Pin to output mode */ haldata->bMacPwrCtrlOn = false; @@ -812,13 +912,10 @@ exit: static void Hal_EfuseParseMACAddr_8188EU(struct adapter *adapt, u8 *hwinfo, bool AutoLoadFail) { - u16 i; - u8 sMacAddr[6] = {0x00, 0xE0, 0x4C, 0x81, 0x88, 0x02}; struct eeprom_priv *eeprom = &adapt->eeprompriv; if (AutoLoadFail) { - for (i = 0; i < 6; i++) - eeprom->mac_addr[i] = sMacAddr[i]; + eth_random_addr(eeprom->mac_addr); } else { /* Read Permanent MAC address */ memcpy(eeprom->mac_addr, &hwinfo[EEPROM_MAC_ADDR_88EU], ETH_ALEN); @@ -829,285 +926,41 @@ void ReadAdapterInfo8188EU(struct adapter *Adapter) { struct eeprom_priv *eeprom = &Adapter->eeprompriv; struct led_priv *ledpriv = &Adapter->ledpriv; + u8 *efuse_buf; u8 eeValue; + int res; /* check system boot selection */ - eeValue = rtw_read8(Adapter, REG_9346CR); - eeprom->EepromOrEfuse = (eeValue & BOOT_FROM_EEPROM); - eeprom->bautoload_fail_flag = !(eeValue & EEPROM_EN); - - if (!is_boot_from_eeprom(Adapter)) - EFUSE_ShadowMapUpdate(Adapter); - - /* parse the eeprom/efuse content */ - Hal_EfuseParseIDCode88E(Adapter, eeprom->efuse_eeprom_data); - Hal_EfuseParseMACAddr_8188EU(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag); - - Hal_ReadPowerSavingMode88E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag); - Hal_ReadTxPowerInfo88E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag); - rtl8188e_EfuseParseChnlPlan(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag); - Hal_EfuseParseXtal_8188E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag); - Hal_ReadAntennaDiversity88E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag); - Hal_ReadThermalMeter_88E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag); - - ledpriv->bRegUseLed = true; -} - -static void ResumeTxBeacon(struct adapter *adapt) -{ - struct hal_data_8188e *haldata = &adapt->haldata; - - /* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */ - /* which should be read from register to a global variable. */ - - rtw_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl) | BIT(6)); - haldata->RegFwHwTxQCtrl |= BIT(6); - rtw_write8(adapt, REG_TBTT_PROHIBIT + 1, 0xff); - haldata->RegReg542 |= BIT(0); - rtw_write8(adapt, REG_TBTT_PROHIBIT + 2, haldata->RegReg542); -} - -static void StopTxBeacon(struct adapter *adapt) -{ - struct hal_data_8188e *haldata = &adapt->haldata; - - /* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */ - /* which should be read from register to a global variable. */ - - rtw_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl) & (~BIT(6))); - haldata->RegFwHwTxQCtrl &= (~BIT(6)); - rtw_write8(adapt, REG_TBTT_PROHIBIT + 1, 0x64); - haldata->RegReg542 &= ~(BIT(0)); - rtw_write8(adapt, REG_TBTT_PROHIBIT + 2, haldata->RegReg542); - - /* todo: CheckFwRsvdPageContent(Adapter); 2010.06.23. Added by tynli. */ -} - -static void hw_var_set_opmode(struct adapter *Adapter, u8 *val) -{ - u8 val8; - u8 mode = *((u8 *)val); - - /* disable Port0 TSF update */ - rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) | BIT(4)); - - /* set net_type */ - val8 = rtw_read8(Adapter, MSR) & 0x0c; - val8 |= mode; - rtw_write8(Adapter, MSR, val8); - - if ((mode == _HW_STATE_STATION_) || (mode == _HW_STATE_NOLINK_)) { - StopTxBeacon(Adapter); - - rtw_write8(Adapter, REG_BCN_CTRL, 0x19);/* disable atim wnd */ - } else if (mode == _HW_STATE_ADHOC_) { - ResumeTxBeacon(Adapter); - rtw_write8(Adapter, REG_BCN_CTRL, 0x1a); - } else if (mode == _HW_STATE_AP_) { - ResumeTxBeacon(Adapter); - - rtw_write8(Adapter, REG_BCN_CTRL, 0x12); - - /* Set RCR */ - rtw_write32(Adapter, REG_RCR, 0x7000208e);/* CBSSID_DATA must set to 0,reject ICV_ERR packet */ - /* enable to rx data frame */ - rtw_write16(Adapter, REG_RXFLTMAP2, 0xFFFF); - /* enable to rx ps-poll */ - rtw_write16(Adapter, REG_RXFLTMAP1, 0x0400); - - /* Beacon Control related register for first time */ - rtw_write8(Adapter, REG_BCNDMATIM, 0x02); /* 2ms */ - - rtw_write8(Adapter, REG_ATIMWND, 0x0a); /* 10ms */ - rtw_write16(Adapter, REG_BCNTCFG, 0x00); - rtw_write16(Adapter, REG_TBTT_PROHIBIT, 0xff04); - rtw_write16(Adapter, REG_TSFTR_SYN_OFFSET, 0x7fff);/* +32767 (~32ms) */ - - /* reset TSF */ - rtw_write8(Adapter, REG_DUAL_TSF_RST, BIT(0)); + res = rtw_read8(Adapter, REG_9346CR, &eeValue); + if (res) + return; - /* BIT(3) - If set 0, hw will clr bcnq when tx becon ok/fail or port 0 */ - rtw_write8(Adapter, REG_MBID_NUM, rtw_read8(Adapter, REG_MBID_NUM) | BIT(3) | BIT(4)); + eeprom->bautoload_fail_flag = !(eeValue & EEPROM_EN); - /* enable BCN0 Function for if1 */ - /* don't enable update TSF0 for if1 (due to TSF update when beacon/probe rsp are received) */ - rtw_write8(Adapter, REG_BCN_CTRL, (DIS_TSF_UDT0_NORMAL_CHIP | EN_BCN_FUNCTION | BIT(1))); + efuse_buf = kmalloc(EFUSE_MAP_LEN_88E, GFP_KERNEL); + if (!efuse_buf) + return; + memset(efuse_buf, 0xFF, EFUSE_MAP_LEN_88E); - /* dis BCN1 ATIM WND if if2 is station */ - rtw_write8(Adapter, REG_BCN_CTRL_1, rtw_read8(Adapter, REG_BCN_CTRL_1) | BIT(0)); + if (!(eeValue & BOOT_FROM_EEPROM) && !eeprom->bautoload_fail_flag) { + rtl8188e_EfusePowerSwitch(Adapter, true); + rtl8188e_ReadEFuse(Adapter, EFUSE_MAP_LEN_88E, efuse_buf); + rtl8188e_EfusePowerSwitch(Adapter, false); } -} - -void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val) -{ - struct hal_data_8188e *haldata = &Adapter->haldata; - struct dm_priv *pdmpriv = &haldata->dmpriv; - struct odm_dm_struct *podmpriv = &haldata->odmpriv; - - switch (variable) { - case HW_VAR_SET_OPMODE: - hw_var_set_opmode(Adapter, val); - break; - case HW_VAR_BASIC_RATE: - { - u16 BrateCfg = 0; - u8 RateIndex = 0; - - /* 2007.01.16, by Emily */ - /* Select RRSR (in Legacy-OFDM and CCK) */ - /* For 8190, we select only 24M, 12M, 6M, 11M, 5.5M, 2M, and 1M from the Basic rate. */ - /* We do not use other rates. */ - HalSetBrateCfg(Adapter, val, &BrateCfg); - - /* 2011.03.30 add by Luke Lee */ - /* CCK 2M ACK should be disabled for some BCM and Atheros AP IOT */ - /* because CCK 2M has poor TXEVM */ - /* CCK 5.5M & 11M ACK should be enabled for better performance */ - - BrateCfg = (BrateCfg | 0xd) & 0x15d; - - BrateCfg |= 0x01; /* default enable 1M ACK rate */ - /* Set RRSR rate table. */ - rtw_write8(Adapter, REG_RRSR, BrateCfg & 0xff); - rtw_write8(Adapter, REG_RRSR + 1, (BrateCfg >> 8) & 0xff); - rtw_write8(Adapter, REG_RRSR + 2, rtw_read8(Adapter, REG_RRSR + 2) & 0xf0); - - /* Set RTS initial rate */ - while (BrateCfg > 0x1) { - BrateCfg = (BrateCfg >> 1); - RateIndex++; - } - /* Ziv - Check */ - rtw_write8(Adapter, REG_INIRTS_RATE_SEL, RateIndex); - } - break; - case HW_VAR_CORRECT_TSF: - { - u64 tsf; - struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv; - struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; - - tsf = pmlmeext->TSFValue - do_div(pmlmeext->TSFValue, - pmlmeinfo->bcn_interval * 1024) - 1024; /* us */ - if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)) - StopTxBeacon(Adapter); - - /* disable related TSF function */ - rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) & (~BIT(3))); - - rtw_write32(Adapter, REG_TSFTR, tsf); - rtw_write32(Adapter, REG_TSFTR + 4, tsf >> 32); - - /* enable related TSF function */ - rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) | BIT(3)); + /* parse the eeprom/efuse content */ + Hal_EfuseParseIDCode88E(Adapter, efuse_buf); + Hal_EfuseParseMACAddr_8188EU(Adapter, efuse_buf, eeprom->bautoload_fail_flag); - if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)) - ResumeTxBeacon(Adapter); - } - break; - case HW_VAR_MLME_SITESURVEY: - if (*((u8 *)val)) { /* under sitesurvey */ - /* config RCR to receive different BSSID & not to receive data frame */ - u32 v = rtw_read32(Adapter, REG_RCR); - v &= ~(RCR_CBSSID_BCN); - rtw_write32(Adapter, REG_RCR, v); - /* reject all data frame */ - rtw_write16(Adapter, REG_RXFLTMAP2, 0x00); - - /* disable update TSF */ - rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) | BIT(4)); - } else { /* sitesurvey done */ - struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv; - struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; - - if ((is_client_associated_to_ap(Adapter)) || - ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE)) { - /* enable to rx data frame */ - rtw_write16(Adapter, REG_RXFLTMAP2, 0xFFFF); - - /* enable update TSF */ - rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) & (~BIT(4))); - } else if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) { - rtw_write16(Adapter, REG_RXFLTMAP2, 0xFFFF); - /* enable update TSF */ - rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) & (~BIT(4))); - } - rtw_write32(Adapter, REG_RCR, rtw_read32(Adapter, REG_RCR) | RCR_CBSSID_BCN); - } - break; - case HW_VAR_SLOT_TIME: - { - u8 u1bAIFS, aSifsTime; - struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv; - struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; - - rtw_write8(Adapter, REG_SLOT, val[0]); - - if (pmlmeinfo->WMM_enable == 0) { - if (pmlmeext->cur_wireless_mode == WIRELESS_11B) - aSifsTime = 10; - else - aSifsTime = 16; - - u1bAIFS = aSifsTime + (2 * pmlmeinfo->slotTime); - - /* <Roger_EXP> Temporary removed, 2008.06.20. */ - rtw_write8(Adapter, REG_EDCA_VO_PARAM, u1bAIFS); - rtw_write8(Adapter, REG_EDCA_VI_PARAM, u1bAIFS); - rtw_write8(Adapter, REG_EDCA_BE_PARAM, u1bAIFS); - rtw_write8(Adapter, REG_EDCA_BK_PARAM, u1bAIFS); - } - } - break; - case HW_VAR_DM_FLAG: - podmpriv->SupportAbility = *((u8 *)val); - break; - case HW_VAR_DM_FUNC_OP: - if (val[0]) - podmpriv->BK_SupportAbility = podmpriv->SupportAbility; - else - podmpriv->SupportAbility = podmpriv->BK_SupportAbility; - break; - case HW_VAR_DM_FUNC_RESET: - podmpriv->SupportAbility = pdmpriv->InitODMFlag; - break; - case HW_VAR_DM_FUNC_CLR: - podmpriv->SupportAbility = 0; - break; - case HW_VAR_AMPDU_FACTOR: - { - u8 RegToSet_Normal[4] = {0x41, 0xa8, 0x72, 0xb9}; - u8 FactorToSet; - u8 *pRegToSet; - u8 index = 0; - - pRegToSet = RegToSet_Normal; /* 0xb972a841; */ - FactorToSet = *((u8 *)val); - if (FactorToSet <= 3) { - FactorToSet = (1 << (FactorToSet + 2)); - if (FactorToSet > 0xf) - FactorToSet = 0xf; - - for (index = 0; index < 4; index++) { - if ((pRegToSet[index] & 0xf0) > (FactorToSet << 4)) - pRegToSet[index] = (pRegToSet[index] & 0x0f) | (FactorToSet << 4); - - if ((pRegToSet[index] & 0x0f) > FactorToSet) - pRegToSet[index] = (pRegToSet[index] & 0xf0) | (FactorToSet); - - rtw_write8(Adapter, (REG_AGGLEN_LMT + index), pRegToSet[index]); - } - } - } - break; - case HW_VAR_H2C_MEDIA_STATUS_RPT: - rtl8188e_set_FwMediaStatus_cmd(Adapter, (*(__le16 *)val)); - break; - default: - break; - } + Hal_ReadPowerSavingMode88E(Adapter, efuse_buf, eeprom->bautoload_fail_flag); + Hal_ReadTxPowerInfo88E(Adapter, efuse_buf, eeprom->bautoload_fail_flag); + rtl8188e_EfuseParseChnlPlan(Adapter, efuse_buf, eeprom->bautoload_fail_flag); + Hal_EfuseParseXtal_8188E(Adapter, efuse_buf, eeprom->bautoload_fail_flag); + Hal_ReadAntennaDiversity88E(Adapter, efuse_buf, eeprom->bautoload_fail_flag); + Hal_ReadThermalMeter_88E(Adapter, efuse_buf, eeprom->bautoload_fail_flag); + ledpriv->bRegUseLed = true; + kfree(efuse_buf); } void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level) @@ -1190,6 +1043,8 @@ void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt) struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; u32 bcn_ctrl_reg = REG_BCN_CTRL; + int res; + u8 reg; /* reset TSF, enable update TSF, correcting TSF On Beacon */ /* BCN interval */ @@ -1200,7 +1055,10 @@ void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt) rtw_write8(adapt, REG_SLOT, 0x09); - value32 = rtw_read32(adapt, REG_TCR); + res = rtw_read32(adapt, REG_TCR, &value32); + if (res) + return; + value32 &= ~TSFRST; rtw_write32(adapt, REG_TCR, value32); @@ -1213,9 +1071,13 @@ void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt) _BeaconFunctionEnable(adapt, true, true); - ResumeTxBeacon(adapt); + rtw_resume_tx_beacon(adapt); + + res = rtw_read8(adapt, bcn_ctrl_reg, ®); + if (res) + return; - rtw_write8(adapt, bcn_ctrl_reg, rtw_read8(adapt, bcn_ctrl_reg) | BIT(1)); + rtw_write8(adapt, bcn_ctrl_reg, reg | BIT(1)); } void rtl8188eu_init_default_value(struct adapter *adapt) diff --git a/drivers/staging/r8188eu/hal/usb_ops_linux.c b/drivers/staging/r8188eu/hal/usb_ops_linux.c index d5e674542a78..c1a4d023f627 100644 --- a/drivers/staging/r8188eu/hal/usb_ops_linux.c +++ b/drivers/staging/r8188eu/hal/usb_ops_linux.c @@ -94,40 +94,47 @@ static int usb_write(struct intf_hdl *intf, u16 value, void *data, u8 size) return status; } -u8 rtw_read8(struct adapter *adapter, u32 addr) +int __must_check rtw_read8(struct adapter *adapter, u32 addr, u8 *data) { struct io_priv *io_priv = &adapter->iopriv; struct intf_hdl *intf = &io_priv->intf; u16 value = addr & 0xffff; - u8 data; - usb_read(intf, value, &data, 1); - - return data; + return usb_read(intf, value, data, 1); } -u16 rtw_read16(struct adapter *adapter, u32 addr) +int __must_check rtw_read16(struct adapter *adapter, u32 addr, u16 *data) { struct io_priv *io_priv = &adapter->iopriv; struct intf_hdl *intf = &io_priv->intf; u16 value = addr & 0xffff; - __le16 data; + __le16 le_data; + int res; + + res = usb_read(intf, value, &le_data, 2); + if (res) + return res; - usb_read(intf, value, &data, 2); + *data = le16_to_cpu(le_data); - return le16_to_cpu(data); + return 0; } -u32 rtw_read32(struct adapter *adapter, u32 addr) +int __must_check rtw_read32(struct adapter *adapter, u32 addr, u32 *data) { struct io_priv *io_priv = &adapter->iopriv; struct intf_hdl *intf = &io_priv->intf; u16 value = addr & 0xffff; - __le32 data; + __le32 le_data; + int res; + + res = usb_read(intf, value, &le_data, 4); + if (res) + return res; - usb_read(intf, value, &data, 4); + *data = le32_to_cpu(le_data); - return le32_to_cpu(data); + return 0; } int rtw_write8(struct adapter *adapter, u32 addr, u8 val) diff --git a/drivers/staging/r8188eu/include/Hal8188EPwrSeq.h b/drivers/staging/r8188eu/include/Hal8188EPwrSeq.h deleted file mode 100644 index e4c5b5d23cb4..000000000000 --- a/drivers/staging/r8188eu/include/Hal8188EPwrSeq.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ -/* Copyright(c) 2007 - 2011 Realtek Corporation. */ - -#ifndef __HAL8188EPWRSEQ_H__ -#define __HAL8188EPWRSEQ_H__ - -#include "HalPwrSeqCmd.h" - -extern struct wl_pwr_cfg rtl8188E_power_on_flow[]; -extern struct wl_pwr_cfg rtl8188E_card_disable_flow[]; -extern struct wl_pwr_cfg rtl8188E_enter_lps_flow[]; - -#endif /* __HAL8188EPWRSEQ_H__ */ diff --git a/drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h b/drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h index 20d73ca781e8..c571ad9478ea 100644 --- a/drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h +++ b/drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h @@ -22,19 +22,6 @@ le32_to_cpu((*(__le32 *)(__rxstatusdesc + 16)) #define GET_TX_RPT2_DESC_MACID_VALID_2_88E(__rxstatusdesc) \ le32_to_cpu((*(__le32 *)(__rxstatusdesc + 20)) - -#define GET_TX_REPORT_TYPE1_RERTY_0(__paddr) \ - le16_get_bits(*(__le16 *)__paddr, GENMASK(15, 0)) -#define GET_TX_REPORT_TYPE1_RERTY_1(__paddr) \ - LE_BITS_TO_1BYTE(__paddr + 2, 0, 8) -#define GET_TX_REPORT_TYPE1_RERTY_2(__paddr) \ - LE_BITS_TO_1BYTE(__paddr + 3, 0, 8) -#define GET_TX_REPORT_TYPE1_RERTY_3(__paddr) \ - LE_BITS_TO_1BYTE(__paddr + 4, 0, 8) -#define GET_TX_REPORT_TYPE1_RERTY_4(__paddr) \ - LE_BITS_TO_1BYTE(__paddr + 5, 0, 8) -#define GET_TX_REPORT_TYPE1_DROP_0(__paddr) \ - LE_BITS_TO_1BYTE(__paddr + 6, 0, 8) /* End rate adaptive define */ int ODM_RAInfo_Init_all(struct odm_dm_struct *dm_odm); diff --git a/drivers/staging/r8188eu/include/HalPwrSeqCmd.h b/drivers/staging/r8188eu/include/HalPwrSeqCmd.h index 49c02cce569e..0886300d26bf 100644 --- a/drivers/staging/r8188eu/include/HalPwrSeqCmd.h +++ b/drivers/staging/r8188eu/include/HalPwrSeqCmd.h @@ -6,54 +6,13 @@ #include "drv_types.h" -/*---------------------------------------------*/ -/* 3 The value of cmd: 4 bits */ -/*---------------------------------------------*/ - -#define PWR_CMD_WRITE 0x01 - /* offset: the read register offset */ - /* msk: the mask of the write bits */ - /* value: write value */ - /* note: driver shall implement this cmd by read & msk after write */ - -#define PWR_CMD_POLLING 0x02 - /* offset: the read register offset */ - /* msk: the mask of the polled value */ - /* value: the value to be polled, masked by the msd field. */ - /* note: driver shall implement this cmd by */ - /* do{ */ - /* if ( (Read(offset) & msk) == (value & msk) ) */ - /* break; */ - /* } while (not timeout); */ - -#define PWR_CMD_DELAY 0x03 - /* offset: the value to delay */ - /* msk: N/A */ - /* value: the unit of delay, 0: us, 1: ms */ - -#define PWR_CMD_END 0x04 - /* offset: N/A */ - /* msk: N/A */ - /* value: N/A */ - -enum pwrseq_cmd_delat_unit { - PWRSEQ_DELAY_US, - PWRSEQ_DELAY_MS, -}; - -struct wl_pwr_cfg { - u16 offset; - u8 cmd:4; - u8 msk; - u8 value; +enum r8188eu_pwr_seq { + PWR_ON_FLOW, + DISABLE_FLOW, + LPS_ENTER_FLOW, }; -#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset -#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd -#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk -#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value - /* Prototype of protected function. */ -u8 HalPwrSeqCmdParsing(struct adapter *padapter, struct wl_pwr_cfg PwrCfgCmd[]); +u8 HalPwrSeqCmdParsing(struct adapter *padapter, enum r8188eu_pwr_seq seq); #endif diff --git a/drivers/staging/r8188eu/include/basic_types.h b/drivers/staging/r8188eu/include/basic_types.h deleted file mode 100644 index ffb21170e898..000000000000 --- a/drivers/staging/r8188eu/include/basic_types.h +++ /dev/null @@ -1,52 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ -/* Copyright(c) 2007 - 2011 Realtek Corporation. */ - -#ifndef __BASIC_TYPES_H__ -#define __BASIC_TYPES_H__ - -#include <linux/types.h> -#define NDIS_OID uint - -typedef void (*proc_t)(void *); - -#define FIELD_OFFSET(s, field) ((ssize_t)&((s *)(0))->field) - -/* port from fw */ -/* TODO: Macros Below are Sync from SD7-Driver. It is necessary - * to check correctness */ - -/* - * Call endian free function when - * 1. Read/write packet content. - * 2. Before write integer to IO. - * 3. After read integer from IO. -*/ - -/* Convert little data endian to host ordering */ -#define EF1BYTE(_val) \ - ((u8)(_val)) - -/* Create a bit mask */ -#define BIT_LEN_MASK_8(__bitlen) \ - (0xFF >> (8 - (__bitlen))) - -/*Description: - * Return 4-byte value in host byte ordering from - * 4-byte pointer in little-endian system. - */ -#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \ - (EF1BYTE(*((u8 *)(__pstart)))) - -/*Description: -Translate subfield (continuous bits in little-endian) of 4-byte -value to host byte ordering.*/ -#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \ - ( \ - (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \ - BIT_LEN_MASK_8(__bitlen) \ - ) - -#define N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \ - (__value) : (((__value + __aligment - 1) / __aligment) * __aligment)) - -#endif /* __BASIC_TYPES_H__ */ diff --git a/drivers/staging/r8188eu/include/hal_com.h b/drivers/staging/r8188eu/include/hal_com.h index 56ba356b5371..d7e333f6ce39 100644 --- a/drivers/staging/r8188eu/include/hal_com.h +++ b/drivers/staging/r8188eu/include/hal_com.h @@ -131,9 +131,6 @@ #define REG_NOA_DESC_START 0x05E8 #define REG_NOA_DESC_COUNT 0x05EC -#include "HalVerDef.h" -void dump_chip_info(struct HAL_VERSION ChipVersion); - /* return the final channel plan decision */ u8 hal_com_get_channel_plan(struct adapter *padapter, u8 hw_channel_plan, diff --git a/drivers/staging/r8188eu/include/hal_intf.h b/drivers/staging/r8188eu/include/hal_intf.h index a56f3d6ca399..ab6856d8a090 100644 --- a/drivers/staging/r8188eu/include/hal_intf.h +++ b/drivers/staging/r8188eu/include/hal_intf.h @@ -8,31 +8,15 @@ #include "drv_types.h" #include "Hal8188EPhyCfg.h" -enum hw_variables { - HW_VAR_SET_OPMODE, - HW_VAR_BASIC_RATE, - HW_VAR_CORRECT_TSF, - HW_VAR_MLME_SITESURVEY, - HW_VAR_SLOT_TIME, - HW_VAR_DM_FLAG, - HW_VAR_DM_FUNC_OP, - HW_VAR_DM_FUNC_RESET, - HW_VAR_DM_FUNC_CLR, - HW_VAR_AMPDU_FACTOR, - HW_VAR_H2C_MEDIA_STATUS_RPT, -}; - typedef s32 (*c2h_id_filter)(u8 id); -#define is_boot_from_eeprom(adapter) (adapter->eeprompriv.EepromOrEfuse) - void rtl8188eu_interface_configure(struct adapter *adapt); void ReadAdapterInfo8188EU(struct adapter *Adapter); void rtl8188eu_init_default_value(struct adapter *adapt); void rtl8188e_SetHalODMVar(struct adapter *Adapter, void *pValue1, bool bSet); u32 rtl8188eu_InitPowerOn(struct adapter *adapt); void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 PwrState); -void rtl8188e_ReadEFuse(struct adapter *Adapter, u16 _offset, u16 _size_byte, u8 *pbuf); +void rtl8188e_ReadEFuse(struct adapter *Adapter, u16 _size_byte, u8 *pbuf); void hal_notch_filter_8188e(struct adapter *adapter, bool enable); @@ -44,8 +28,6 @@ int rtl8188e_IOL_exec_cmds_sync(struct adapter *adapter, unsigned int rtl8188eu_inirp_init(struct adapter *Adapter); -void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val); - uint rtw_hal_init(struct adapter *padapter); uint rtw_hal_deinit(struct adapter *padapter); void rtw_hal_stop(struct adapter *padapter); diff --git a/drivers/staging/r8188eu/include/ieee80211.h b/drivers/staging/r8188eu/include/ieee80211.h index 15636a808f52..e7a4f8af497a 100644 --- a/drivers/staging/r8188eu/include/ieee80211.h +++ b/drivers/staging/r8188eu/include/ieee80211.h @@ -624,13 +624,6 @@ enum _PUBLIC_ACTION { ACT_PUBLIC_MAX }; -/* BACK action code */ -enum rtw_ieee80211_back_actioncode { - RTW_WLAN_ACTION_ADDBA_REQ = 0, - RTW_WLAN_ACTION_ADDBA_RESP = 1, - RTW_WLAN_ACTION_DELBA = 2, -}; - #define OUI_MICROSOFT 0x0050f2 /* Microsoft (also used in Wi-Fi specs) * 00:50:F2 */ #define WME_OUI_TYPE 2 diff --git a/drivers/staging/r8188eu/include/osdep_service.h b/drivers/staging/r8188eu/include/osdep_service.h index f1a703643e74..72990a1cdc66 100644 --- a/drivers/staging/r8188eu/include/osdep_service.h +++ b/drivers/staging/r8188eu/include/osdep_service.h @@ -5,7 +5,6 @@ #define __OSDEP_SERVICE_H_ #include <linux/sched/signal.h> -#include "basic_types.h" #define _FAIL 0 #define _SUCCESS 1 @@ -77,8 +76,6 @@ void *rtw_malloc2d(int h, int w, int size); spin_lock_init(&((q)->lock)); \ } while (0) -void rtw_usleep_os(int us); - static inline unsigned char _cancel_timer_ex(struct timer_list *ptimer) { return del_timer_sync(ptimer); diff --git a/drivers/staging/r8188eu/include/rtl8188e_hal.h b/drivers/staging/r8188eu/include/rtl8188e_hal.h index d2a069d4e1cc..5cd62b216720 100644 --- a/drivers/staging/r8188eu/include/rtl8188e_hal.h +++ b/drivers/staging/r8188eu/include/rtl8188e_hal.h @@ -26,11 +26,6 @@ #include "odm_RegConfig8188E.h" #include "odm_RTL8188E.h" -/* RTL8188E Power Configuration CMDs for USB/SDIO interfaces */ -#define Rtl8188E_NIC_PWR_ON_FLOW rtl8188E_power_on_flow -#define Rtl8188E_NIC_DISABLE_FLOW rtl8188E_card_disable_flow -#define Rtl8188E_NIC_LPS_ENTER_FLOW rtl8188E_enter_lps_flow - #define DRVINFO_SZ 4 /* unit is 8bytes */ #define PageNum_128(_Len) (u32)(((_Len)>>7) + ((_Len) & 0x7F ? 1 : 0)) diff --git a/drivers/staging/r8188eu/include/rtl8188e_spec.h b/drivers/staging/r8188eu/include/rtl8188e_spec.h index ef42c4b2f20c..9e7b1f89037c 100644 --- a/drivers/staging/r8188eu/include/rtl8188e_spec.h +++ b/drivers/staging/r8188eu/include/rtl8188e_spec.h @@ -9,7 +9,6 @@ #define HAL_PS_TIMER_INT_DELAY 50 /* 50 microseconds */ #define HAL_92C_NAV_UPPER_UNIT 128 /* micro-second */ -#define MAC_ADDR_LEN 6 /* 8188E PKT_BUFF_ACCESS_CTRL value */ #define TXPKT_BUF_SELECT 0x69 #define RXPKT_BUF_SELECT 0xA5 @@ -427,12 +426,6 @@ #define MAX_MSS_DENSITY_2T 0x13 #define MAX_MSS_DENSITY_1T 0x0A -/* EEPROM enable when set 1 */ -#define CmdEEPROM_En BIT(5) -/* System EEPROM select, 0: boot from E-FUSE, 1: The EEPROM used is 9346 */ -#define CmdEERPOMSEL BIT(4) -#define Cmd9346CR_9356SEL BIT(4) - /* 8192C GPIO MUX Configuration Register (offset 0x40, 4 byte) */ #define GPIOSEL_GPIO 0 #define GPIOSEL_ENBT BIT(5) @@ -1059,142 +1052,6 @@ Current IOREG MAP #define SCR_TXBCUSEDK BIT(6) /* Force Tx Bcast pkt Use Default Key */ #define SCR_RXBCUSEDK BIT(7) /* Force Rx Bcast pkt Use Default Key */ -/* RTL8188E SDIO Configuration */ - -/* I/O bus domain address mapping */ -#define SDIO_LOCAL_BASE 0x10250000 -#define WLAN_IOREG_BASE 0x10260000 -#define FIRMWARE_FIFO_BASE 0x10270000 -#define TX_HIQ_BASE 0x10310000 -#define TX_MIQ_BASE 0x10320000 -#define TX_LOQ_BASE 0x10330000 -#define RX_RX0FF_BASE 0x10340000 - -/* SDIO host local register space mapping. */ -#define SDIO_LOCAL_MSK 0x0FFF -#define WLAN_IOREG_MSK 0x7FFF -#define WLAN_FIFO_MSK 0x1FFF /* Aggregation Length[12:0] */ -#define WLAN_RX0FF_MSK 0x0003 - -/* Without ref to the SDIO Device ID */ -#define SDIO_WITHOUT_REF_DEVICE_ID 0 -#define SDIO_LOCAL_DEVICE_ID 0 /* 0b[16], 000b[15:13] */ -#define WLAN_TX_HIQ_DEVICE_ID 4 /* 0b[16], 100b[15:13] */ -#define WLAN_TX_MIQ_DEVICE_ID 5 /* 0b[16], 101b[15:13] */ -#define WLAN_TX_LOQ_DEVICE_ID 6 /* 0b[16], 110b[15:13] */ -#define WLAN_RX0FF_DEVICE_ID 7 /* 0b[16], 111b[15:13] */ -#define WLAN_IOREG_DEVICE_ID 8 /* 1b[16] */ - -/* SDIO Tx Free Page Index */ -#define HI_QUEUE_IDX 0 -#define MID_QUEUE_IDX 1 -#define LOW_QUEUE_IDX 2 -#define PUBLIC_QUEUE_IDX 3 - -#define SDIO_MAX_TX_QUEUE 3 /* HIQ, MIQ and LOQ */ -#define SDIO_MAX_RX_QUEUE 1 - -/* SDIO Tx Control */ -#define SDIO_REG_TX_CTRL 0x0000 -/* SDIO Host Interrupt Mask */ -#define SDIO_REG_HIMR 0x0014 -/* SDIO Host Interrupt Service Routine */ -#define SDIO_REG_HISR 0x0018 -/* HCI Current Power Mode */ -#define SDIO_REG_HCPWM 0x0019 -/* RXDMA Request Length */ -#define SDIO_REG_RX0_REQ_LEN 0x001C -/* Free Tx Buffer Page */ -#define SDIO_REG_FREE_TXPG 0x0020 -/* HCI Current Power Mode 1 */ -#define SDIO_REG_HCPWM1 0x0024 -/* HCI Current Power Mode 2 */ -#define SDIO_REG_HCPWM2 0x0026 -/* HTSF Informaion */ -#define SDIO_REG_HTSFR_INFO 0x0030 -/* HCI Request Power Mode 1 */ -#define SDIO_REG_HRPWM1 0x0080 -/* HCI Request Power Mode 2 */ -#define SDIO_REG_HRPWM2 0x0082 -/* HCI Power Save Clock */ -#define SDIO_REG_HPS_CLKR 0x0084 -/* SDIO HCI Suspend Control */ -#define SDIO_REG_HSUS_CTRL 0x0086 -/* SDIO Host Extension Interrupt Mask Always */ -#define SDIO_REG_HIMR_ON 0x0090 -/* SDIO Host Extension Interrupt Status Always */ -#define SDIO_REG_HISR_ON 0x0091 - -#define SDIO_HIMR_DISABLED 0 - -/* RTL8188E SDIO Host Interrupt Mask Register */ -#define SDIO_HIMR_RX_REQUEST_MSK BIT(0) -#define SDIO_HIMR_AVAL_MSK BIT(1) -#define SDIO_HIMR_TXERR_MSK BIT(2) -#define SDIO_HIMR_RXERR_MSK BIT(3) -#define SDIO_HIMR_TXFOVW_MSK BIT(4) -#define SDIO_HIMR_RXFOVW_MSK BIT(5) -#define SDIO_HIMR_TXBCNOK_MSK BIT(6) -#define SDIO_HIMR_TXBCNERR_MSK BIT(7) -#define SDIO_HIMR_BCNERLY_INT_MSK BIT(16) -#define SDIO_HIMR_C2HCMD_MSK BIT(17) -#define SDIO_HIMR_CPWM1_MSK BIT(18) -#define SDIO_HIMR_CPWM2_MSK BIT(19) -#define SDIO_HIMR_HSISR_IND_MSK BIT(20) -#define SDIO_HIMR_GTINT3_IND_MSK BIT(21) -#define SDIO_HIMR_GTINT4_IND_MSK BIT(22) -#define SDIO_HIMR_PSTIMEOUT_MSK BIT(23) -#define SDIO_HIMR_OCPINT_MSK BIT(24) -#define SDIO_HIMR_ATIMEND_MSK BIT(25) -#define SDIO_HIMR_ATIMEND_E_MSK BIT(26) -#define SDIO_HIMR_CTWEND_MSK BIT(27) - -/* RTL8188E SDIO Specific */ -#define SDIO_HIMR_MCU_ERR_MSK BIT(28) -#define SDIO_HIMR_TSF_BIT32_TOGGLE_MSK BIT(29) - -/* SDIO Host Interrupt Service Routine */ -#define SDIO_HISR_RX_REQUEST BIT(0) -#define SDIO_HISR_AVAL BIT(1) -#define SDIO_HISR_TXERR BIT(2) -#define SDIO_HISR_RXERR BIT(3) -#define SDIO_HISR_TXFOVW BIT(4) -#define SDIO_HISR_RXFOVW BIT(5) -#define SDIO_HISR_TXBCNOK BIT(6) -#define SDIO_HISR_TXBCNERR BIT(7) -#define SDIO_HISR_BCNERLY_INT BIT(16) -#define SDIO_HISR_C2HCMD BIT(17) -#define SDIO_HISR_CPWM1 BIT(18) -#define SDIO_HISR_CPWM2 BIT(19) -#define SDIO_HISR_HSISR_IND BIT(20) -#define SDIO_HISR_GTINT3_IND BIT(21) -#define SDIO_HISR_GTINT4_IND BIT(22) -#define SDIO_HISR_PSTIME BIT(23) -#define SDIO_HISR_OCPINT BIT(24) -#define SDIO_HISR_ATIMEND BIT(25) -#define SDIO_HISR_ATIMEND_E BIT(26) -#define SDIO_HISR_CTWEND BIT(27) - -/* RTL8188E SDIO Specific */ -#define SDIO_HISR_MCU_ERR BIT(28) -#define SDIO_HISR_TSF_BIT32_TOGGLE BIT(29) - -#define MASK_SDIO_HISR_CLEAR \ - (SDIO_HISR_TXERR | SDIO_HISR_RXERR | SDIO_HISR_TXFOVW |\ - SDIO_HISR_RXFOVW | SDIO_HISR_TXBCNOK | SDIO_HISR_TXBCNERR |\ - SDIO_HISR_C2HCMD | SDIO_HISR_CPWM1 | SDIO_HISR_CPWM2 |\ - SDIO_HISR_HSISR_IND | SDIO_HISR_GTINT3_IND | SDIO_HISR_GTINT4_IND |\ - SDIO_HISR_PSTIMEOUT | SDIO_HISR_OCPINT) - -/* SDIO HCI Suspend Control Register */ -#define HCI_RESUME_PWR_RDY BIT(1) -#define HCI_SUS_CTRL BIT(0) - -/* SDIO Tx FIFO related */ -/* The number of Tx FIFO free page */ -#define SDIO_TX_FREE_PG_QUEUE 4 -#define SDIO_TX_FIFO_PAGE_SZ 128 - /* 0xFE00h ~ 0xFE55h USB Configuration */ /* 2 USB Information (0xFE17) */ diff --git a/drivers/staging/r8188eu/include/rtw_eeprom.h b/drivers/staging/r8188eu/include/rtw_eeprom.h index d8d48ace356c..94d735b1d0db 100644 --- a/drivers/staging/r8188eu/include/rtw_eeprom.h +++ b/drivers/staging/r8188eu/include/rtw_eeprom.h @@ -7,19 +7,9 @@ #include "osdep_service.h" #include "drv_types.h" -#define HWSET_MAX_SIZE_512 512 - struct eeprom_priv { u8 bautoload_fail_flag; u8 mac_addr[ETH_ALEN] __aligned(2); /* PermanentAddress */ - u8 EepromOrEfuse; - u8 efuse_eeprom_data[HWSET_MAX_SIZE_512] __aligned(4); }; -void eeprom_write16(struct adapter *padapter, u16 reg, u16 data); -u16 eeprom_read16(struct adapter *padapter, u16 reg); -void read_eeprom_content(struct adapter *padapter); -void eeprom_read_sz(struct adapter *adapt, u16 reg, u8 *data, u32 sz); -void read_eeprom_content_by_attrib(struct adapter *padapter); - #endif /* __RTL871X_EEPROM_H__ */ diff --git a/drivers/staging/r8188eu/include/rtw_efuse.h b/drivers/staging/r8188eu/include/rtw_efuse.h index 2daf69f554d5..3d688a0e6dfb 100644 --- a/drivers/staging/r8188eu/include/rtw_efuse.h +++ b/drivers/staging/r8188eu/include/rtw_efuse.h @@ -8,6 +8,4 @@ void ReadEFuseByte(struct adapter *adapter, u16 _offset, u8 *pbuf); -void EFUSE_ShadowMapUpdate(struct adapter *adapter); - #endif diff --git a/drivers/staging/r8188eu/include/rtw_io.h b/drivers/staging/r8188eu/include/rtw_io.h index 6910e2b430e2..925c7967ac04 100644 --- a/drivers/staging/r8188eu/include/rtw_io.h +++ b/drivers/staging/r8188eu/include/rtw_io.h @@ -220,9 +220,9 @@ void unregister_intf_hdl(struct intf_hdl *pintfhdl); void _rtw_attrib_read(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem); void _rtw_attrib_write(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem); -u8 rtw_read8(struct adapter *adapter, u32 addr); -u16 rtw_read16(struct adapter *adapter, u32 addr); -u32 rtw_read32(struct adapter *adapter, u32 addr); +int __must_check rtw_read8(struct adapter *adapter, u32 addr, u8 *data); +int __must_check rtw_read16(struct adapter *adapter, u32 addr, u16 *data); +int __must_check rtw_read32(struct adapter *adapter, u32 addr, u32 *data); void _rtw_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem); u32 rtw_read_port(struct adapter *adapter, u8 *pmem); void rtw_read_port_cancel(struct adapter *adapter); @@ -283,7 +283,7 @@ void free_io_queue(struct adapter *adapter); void async_bus_io(struct io_queue *pio_q); void bus_sync_io(struct io_queue *pio_q); u32 _ioreq2rwmem(struct io_queue *pio_q); -void dev_power_down(struct adapter * Adapter, u8 bpwrup); +void dev_power_down(struct adapter *Adapter, u8 bpwrup); #define PlatformEFIOWrite1Byte(_a,_b,_c) \ rtw_write8(_a,_b,_c) diff --git a/drivers/staging/r8188eu/include/rtw_iol.h b/drivers/staging/r8188eu/include/rtw_iol.h index fb88ebc1dabb..099f5a075274 100644 --- a/drivers/staging/r8188eu/include/rtw_iol.h +++ b/drivers/staging/r8188eu/include/rtw_iol.h @@ -41,22 +41,14 @@ int rtw_IOL_append_END_cmd(struct xmit_frame *xmit_frame); void read_efuse_from_txpktbuf(struct adapter *adapter, int bcnhead, u8 *content, u16 *size); -int _rtw_IOL_append_WB_cmd(struct xmit_frame *xmit_frame, u16 addr, - u8 value, u8 mask); -int _rtw_IOL_append_WW_cmd(struct xmit_frame *xmit_frame, u16 addr, - u16 value, u16 mask); -int _rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr, - u32 value, u32 mask); -int _rtw_IOL_append_WRF_cmd(struct xmit_frame *xmit_frame, u8 rf_path, - u16 addr, u32 value, u32 mask); -#define rtw_IOL_append_WB_cmd(xmit_frame, addr, value, mask) \ - _rtw_IOL_append_WB_cmd((xmit_frame), (addr), (value) ,(mask)) -#define rtw_IOL_append_WW_cmd(xmit_frame, addr, value, mask) \ - _rtw_IOL_append_WW_cmd((xmit_frame), (addr), (value),(mask)) -#define rtw_IOL_append_WD_cmd(xmit_frame, addr, value, mask) \ - _rtw_IOL_append_WD_cmd((xmit_frame), (addr), (value), (mask)) -#define rtw_IOL_append_WRF_cmd(xmit_frame, rf_path, addr, value, mask) \ - _rtw_IOL_append_WRF_cmd((xmit_frame),(rf_path), (addr), (value), (mask)) +int rtw_IOL_append_WB_cmd(struct xmit_frame *xmit_frame, u16 addr, + u8 value, u8 mask); +int rtw_IOL_append_WW_cmd(struct xmit_frame *xmit_frame, u16 addr, + u16 value, u16 mask); +int rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr, + u32 value, u32 mask); +int rtw_IOL_append_WRF_cmd(struct xmit_frame *xmit_frame, u8 rf_path, + u16 addr, u32 value, u32 mask); u8 rtw_IOL_cmd_boundary_handle(struct xmit_frame *pxmit_frame); diff --git a/drivers/staging/r8188eu/include/rtw_led.h b/drivers/staging/r8188eu/include/rtw_led.h index 2c14cb23d9ad..d6b0c1c2f9a2 100644 --- a/drivers/staging/r8188eu/include/rtw_led.h +++ b/drivers/staging/r8188eu/include/rtw_led.h @@ -37,9 +37,11 @@ enum LED_STATE_871x { LED_BLINK_RUNTOP = 13, /* Customized for RunTop */ }; -struct LED_871x { +struct led_priv { struct adapter *padapter; + bool bRegUseLed; + enum LED_STATE_871x CurrLedState; /* Current LED state. */ enum LED_STATE_871x BlinkingLedState; /* Next state for blinking, * either RTW_LED_ON or RTW_LED_OFF are. */ @@ -58,11 +60,6 @@ struct LED_871x { struct delayed_work blink_work; }; -struct led_priv{ - struct LED_871x SwLed0; - bool bRegUseLed; -}; - void rtl8188eu_InitSwLeds(struct adapter *padapter); void rtl8188eu_DeInitSwLeds(struct adapter *padapter); diff --git a/drivers/staging/r8188eu/include/rtw_mlme_ext.h b/drivers/staging/r8188eu/include/rtw_mlme_ext.h index 573d65b175cc..343ce1ce4b3d 100644 --- a/drivers/staging/r8188eu/include/rtw_mlme_ext.h +++ b/drivers/staging/r8188eu/include/rtw_mlme_ext.h @@ -424,6 +424,9 @@ void invalidate_cam_all(struct adapter *padapter); int allocate_fw_sta_entry(struct adapter *padapter); void flush_all_cam_entry(struct adapter *padapter); +void rtw_mlme_under_site_survey(struct adapter *adapter); +void rtw_mlme_site_survey_done(struct adapter *adapter); + void site_survey(struct adapter *padapter); u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame, struct wlan_bssid_ex *bssid); @@ -455,6 +458,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len); void update_IOT_info(struct adapter *padapter); void update_capinfo(struct adapter *adapter, u16 updatecap); void update_wireless_mode(struct adapter *padapter); +void rtw_set_basic_rate(struct adapter *adapter, u8 *rates); void update_tx_basic_rate(struct adapter *padapter, u8 modulation); void update_bmc_sta_support_rate(struct adapter *padapter, u32 mac_id); int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, @@ -468,8 +472,7 @@ unsigned int update_MSC_rate(struct HT_caps_element *pHT_caps); void Update_RA_Entry(struct adapter *padapter, u32 mac_id); void set_sta_rate(struct adapter *padapter, struct sta_info *psta); -unsigned int receive_disconnect(struct adapter *padapter, - unsigned char *macaddr, unsigned short reason); +void receive_disconnect(struct adapter *padapter, unsigned char *macaddr, unsigned short reason); unsigned char get_highest_rate_idx(u32 mask); int support_short_GI(struct adapter *padapter, struct HT_caps_element *caps); @@ -524,12 +527,13 @@ int issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason); int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason, int try_cnt, int wait_ms); -void issue_action_BA(struct adapter *padapter, unsigned char *raddr, - unsigned char action, unsigned short status); +void issue_action_BA(struct adapter *padapter, unsigned char *raddr, u8 action, u16 status); unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr); unsigned int send_beacon(struct adapter *padapter); bool get_beacon_valid_bit(struct adapter *adapter); void clear_beacon_valid_bit(struct adapter *adapter); +void rtw_resume_tx_beacon(struct adapter *adapt); +void rtw_stop_tx_beacon(struct adapter *adapt); void start_clnt_assoc(struct adapter *padapter); void start_clnt_auth(struct adapter *padapter); @@ -544,12 +548,8 @@ unsigned int OnProbeReq(struct adapter *padapter, struct recv_frame *precv_frame); unsigned int OnProbeRsp(struct adapter *padapter, struct recv_frame *precv_frame); -unsigned int DoReserved(struct adapter *padapter, - struct recv_frame *precv_frame); unsigned int OnBeacon(struct adapter *padapter, struct recv_frame *precv_frame); -unsigned int OnAtim(struct adapter *padapter, - struct recv_frame *precv_frame); unsigned int OnDisassoc(struct adapter *padapter, struct recv_frame *precv_frame); unsigned int OnAuth(struct adapter *padapter, @@ -592,9 +592,6 @@ void addba_timer_hdl(struct sta_info *psta); bool cckrates_included(unsigned char *rate, int ratelen); bool cckratesonly_included(unsigned char *rate, int ratelen); -void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len); -void correct_TSF(struct adapter *padapter, struct mlme_ext_priv *pmlmeext); - struct cmd_hdl { uint parmsize; u8 (*h2cfuns)(struct adapter *padapter, u8 *pbuf); diff --git a/drivers/staging/r8188eu/include/usb_ops_linux.h b/drivers/staging/r8188eu/include/usb_ops_linux.h index 641f059ffaf7..966688eedf66 100644 --- a/drivers/staging/r8188eu/include/usb_ops_linux.h +++ b/drivers/staging/r8188eu/include/usb_ops_linux.h @@ -26,6 +26,4 @@ #define usb_read_interrupt_complete(purb, regs) \ usb_read_interrupt_complete(purb) -unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr); - #endif diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c index 8dd280e2739a..7f91dac2e41b 100644 --- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c @@ -687,12 +687,9 @@ static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a, enum ndis_802_11_network_infra networkType; int ret = 0; - - - if (_FAIL == rtw_pwr_wakeup(padapter)) { - ret = -EPERM; + ret = rtw_pwr_wakeup(padapter); + if (ret) goto exit; - } if (!padapter->hw_init_completed) { ret = -EPERM; @@ -931,12 +928,9 @@ static int rtw_wx_set_wap(struct net_device *dev, struct wlan_network *pnetwork = NULL; enum ndis_802_11_auth_mode authmode; - - - if (_FAIL == rtw_pwr_wakeup(padapter)) { - ret = -1; + ret = rtw_pwr_wakeup(padapter); + if (ret) goto exit; - } if (!padapter->bup) { ret = -1; @@ -1049,10 +1043,9 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT]; struct wifidirect_info *pwdinfo = &padapter->wdinfo; - if (_FAIL == rtw_pwr_wakeup(padapter)) { - ret = -1; + ret = rtw_pwr_wakeup(padapter); + if (ret) goto exit; - } if (padapter->bDriverStopped) { ret = -1; @@ -1252,10 +1245,9 @@ static int rtw_wx_set_essid(struct net_device *dev, uint ret = 0, len; - if (_FAIL == rtw_pwr_wakeup(padapter)) { - ret = -1; + ret = rtw_pwr_wakeup(padapter); + if (ret) goto exit; - } if (!padapter->bup) { ret = -1; @@ -1593,7 +1585,7 @@ static int rtw_wx_set_enc(struct net_device *dev, if (erq->length > 0) { wep.KeyLength = erq->length <= 5 ? 5 : 13; - wep.Length = wep.KeyLength + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial); + wep.Length = wep.KeyLength + offsetof(struct ndis_802_11_wep, KeyMaterial); } else { wep.KeyLength = 0; @@ -3126,18 +3118,29 @@ exit: static void mac_reg_dump(struct adapter *padapter) { int i, j = 1; + u32 reg; + int res; + pr_info("\n ======= MAC REG =======\n"); for (i = 0x0; i < 0x300; i += 4) { if (j % 4 == 1) pr_info("0x%02x", i); - pr_info(" 0x%08x ", rtw_read32(padapter, i)); + + res = rtw_read32(padapter, i, ®); + if (!res) + pr_info(" 0x%08x ", reg); + if ((j++) % 4 == 0) pr_info("\n"); } for (i = 0x400; i < 0x800; i += 4) { if (j % 4 == 1) pr_info("0x%02x", i); - pr_info(" 0x%08x ", rtw_read32(padapter, i)); + + res = rtw_read32(padapter, i, ®); + if (!res) + pr_info(" 0x%08x ", reg); + if ((j++) % 4 == 0) pr_info("\n"); } @@ -3145,13 +3148,18 @@ static void mac_reg_dump(struct adapter *padapter) static void bb_reg_dump(struct adapter *padapter) { - int i, j = 1; + int i, j = 1, res; + u32 reg; + pr_info("\n ======= BB REG =======\n"); for (i = 0x800; i < 0x1000; i += 4) { if (j % 4 == 1) pr_info("0x%02x", i); - pr_info(" 0x%08x ", rtw_read32(padapter, i)); + res = rtw_read32(padapter, i, ®); + if (!res) + pr_info(" 0x%08x ", reg); + if ((j++) % 4 == 0) pr_info("\n"); } @@ -3178,6 +3186,7 @@ static void rtw_set_dynamic_functions(struct adapter *adapter, u8 dm_func) { struct hal_data_8188e *haldata = &adapter->haldata; struct odm_dm_struct *odmpriv = &haldata->odmpriv; + int res; switch (dm_func) { case 0: @@ -3193,7 +3202,9 @@ static void rtw_set_dynamic_functions(struct adapter *adapter, u8 dm_func) if (!(odmpriv->SupportAbility & DYNAMIC_BB_DIG)) { struct rtw_dig *digtable = &odmpriv->DM_DigTable; - digtable->CurIGValue = rtw_read8(adapter, 0xc50); + res = rtw_read8(adapter, 0xc50, &digtable->CurIGValue); + (void)res; + /* FIXME: return an error to caller */ } odmpriv->SupportAbility = DYNAMIC_ALL_FUNC_ENABLE; break; @@ -3202,6 +3213,14 @@ static void rtw_set_dynamic_functions(struct adapter *adapter, u8 dm_func) } } +static void rtw_set_dm_func_flag(struct adapter *adapter, u32 odm_flag) +{ + struct hal_data_8188e *haldata = &adapter->haldata; + struct odm_dm_struct *odmpriv = &haldata->odmpriv; + + odmpriv->SupportAbility = odm_flag; +} + static int rtw_dbg_port(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) @@ -3329,8 +3348,9 @@ static int rtw_dbg_port(struct net_device *dev, u16 reg = arg; u16 start_value = 0; u32 write_num = extra_arg; - int i; + int i, res; struct xmit_frame *xmit_frame; + u8 val8; xmit_frame = rtw_IOL_accquire_xmit_frame(padapter); if (!xmit_frame) { @@ -3343,7 +3363,9 @@ static int rtw_dbg_port(struct net_device *dev, if (rtl8188e_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0) != _SUCCESS) ret = -EPERM; - rtw_read8(padapter, reg); + /* FIXME: is this read necessary? */ + res = rtw_read8(padapter, reg, &val8); + (void)res; } break; @@ -3352,8 +3374,8 @@ static int rtw_dbg_port(struct net_device *dev, u16 reg = arg; u16 start_value = 200; u32 write_num = extra_arg; - - int i; + u16 val16; + int i, res; struct xmit_frame *xmit_frame; xmit_frame = rtw_IOL_accquire_xmit_frame(padapter); @@ -3367,7 +3389,9 @@ static int rtw_dbg_port(struct net_device *dev, if (rtl8188e_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0) != _SUCCESS) ret = -EPERM; - rtw_read16(padapter, reg); + /* FIXME: is this read necessary? */ + res = rtw_read16(padapter, reg, &val16); + (void)res; } break; case 0x08: /* continuous write dword test */ @@ -3390,7 +3414,8 @@ static int rtw_dbg_port(struct net_device *dev, if (rtl8188e_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0) != _SUCCESS) ret = -EPERM; - rtw_read32(padapter, reg); + /* FIXME: is this read necessary? */ + ret = rtw_read32(padapter, reg, &write_num); } break; } @@ -3434,7 +3459,7 @@ static int rtw_dbg_port(struct net_device *dev, case 0x06: { u32 ODMFlag = (u32)(0x0f & arg); - SetHwReg8188EU(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag)); + rtw_set_dm_func_flag(padapter, ODMFlag); } break; case 0x07: diff --git a/drivers/staging/r8188eu/os_dep/os_intfs.c b/drivers/staging/r8188eu/os_dep/os_intfs.c index 891c85b088ca..cac9553666e6 100644 --- a/drivers/staging/r8188eu/os_dep/os_intfs.c +++ b/drivers/staging/r8188eu/os_dep/os_intfs.c @@ -740,19 +740,32 @@ static void rtw_fifo_cleanup(struct adapter *adapter) { struct pwrctrl_priv *pwrpriv = &adapter->pwrctrlpriv; u8 trycnt = 100; + int res; + u32 reg; /* pause tx */ rtw_write8(adapter, REG_TXPAUSE, 0xff); /* keep sn */ - adapter->xmitpriv.nqos_ssn = rtw_read16(adapter, REG_NQOS_SEQ); + /* FIXME: return an error to caller */ + res = rtw_read16(adapter, REG_NQOS_SEQ, &adapter->xmitpriv.nqos_ssn); + if (res) + return; if (!pwrpriv->bkeepfwalive) { /* RX DMA stop */ + res = rtw_read32(adapter, REG_RXPKT_NUM, ®); + if (res) + return; + rtw_write32(adapter, REG_RXPKT_NUM, - (rtw_read32(adapter, REG_RXPKT_NUM) | RW_RELEASE_EN)); + (reg | RW_RELEASE_EN)); do { - if (!(rtw_read32(adapter, REG_RXPKT_NUM) & RXDMA_IDLE)) + res = rtw_read32(adapter, REG_RXPKT_NUM, ®); + if (res) + continue; + + if (!(reg & RXDMA_IDLE)) break; } while (trycnt--); diff --git a/drivers/staging/r8188eu/os_dep/osdep_service.c b/drivers/staging/r8188eu/os_dep/osdep_service.c index 812acd59be79..3504a0a9ba87 100644 --- a/drivers/staging/r8188eu/os_dep/osdep_service.c +++ b/drivers/staging/r8188eu/os_dep/osdep_service.c @@ -42,14 +42,6 @@ Otherwise, there will be racing condition. Caller must check if the list is empty before calling rtw_list_delete */ -void rtw_usleep_os(int us) -{ - if (1 < (us / 1000)) - msleep(1); - else - msleep((us / 1000) + 1); -} - static const struct device_type wlan_type = { .name = "wlan", }; diff --git a/drivers/staging/r8188eu/os_dep/usb_intf.c b/drivers/staging/r8188eu/os_dep/usb_intf.c index 68869c5daeff..cc2b44f60c46 100644 --- a/drivers/staging/r8188eu/os_dep/usb_intf.c +++ b/drivers/staging/r8188eu/os_dep/usb_intf.c @@ -372,7 +372,7 @@ handle_dualmac: free_adapter: if (pnetdev) rtw_free_netdev(pnetdev); - else if (padapter) + else vfree(padapter); return NULL; diff --git a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c b/drivers/staging/r8188eu/os_dep/usb_ops_linux.c index 0269e602b217..220e592b757c 100644 --- a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c +++ b/drivers/staging/r8188eu/os_dep/usb_ops_linux.c @@ -7,7 +7,7 @@ #include "../include/usb_ops_linux.h" #include "../include/rtl8188e_recv.h" -unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr) +static unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr) { unsigned int pipe = 0, ep_num = 0; struct usb_device *pusbd = pdvobj->pusbdev; diff --git a/drivers/staging/r8188eu/os_dep/xmit_linux.c b/drivers/staging/r8188eu/os_dep/xmit_linux.c index e430c64e9068..91a1e4e3219a 100644 --- a/drivers/staging/r8188eu/os_dep/xmit_linux.c +++ b/drivers/staging/r8188eu/os_dep/xmit_linux.c @@ -71,7 +71,7 @@ int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitb if (!pxmitbuf->pallocated_buf) return _FAIL; - pxmitbuf->pbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ); + pxmitbuf->pbuf = (u8 *)ALIGN((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ); pxmitbuf->dma_transfer_addr = 0; pxmitbuf->pxmit_urb = usb_alloc_urb(0, GFP_KERNEL); diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c index 37715afb0210..42f81b23a144 100644 --- a/drivers/staging/rtl8192e/rtllib_tx.c +++ b/drivers/staging/rtl8192e/rtllib_tx.c @@ -205,30 +205,28 @@ static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size, struct rtllib_txb *txb; int i; - txb = kmalloc(sizeof(struct rtllib_txb) + (sizeof(u8 *) * nr_frags), - gfp_mask); + txb = kzalloc(struct_size(txb, fragments, nr_frags), gfp_mask); if (!txb) return NULL; - memset(txb, 0, sizeof(struct rtllib_txb)); txb->nr_frags = nr_frags; txb->frag_size = cpu_to_le16(txb_size); for (i = 0; i < nr_frags; i++) { txb->fragments[i] = dev_alloc_skb(txb_size); - if (unlikely(!txb->fragments[i])) { - i--; - break; - } + if (unlikely(!txb->fragments[i])) + goto err_free; memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb)); } - if (unlikely(i != nr_frags)) { - while (i >= 0) - dev_kfree_skb_any(txb->fragments[i--]); - kfree(txb); - return NULL; - } + return txb; + +err_free: + while (--i >= 0) + dev_kfree_skb_any(txb->fragments[i]); + kfree(txb); + + return NULL; } static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu) diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c index cf9a240924f2..da2c41c9b92f 100644 --- a/drivers/staging/rtl8192e/rtllib_wx.c +++ b/drivers/staging/rtl8192e/rtllib_wx.c @@ -17,17 +17,9 @@ #include <linux/module.h> #include <linux/etherdevice.h> #include "rtllib.h" -struct modes_unit { - char *mode_string; - int mode_size; -}; -static struct modes_unit rtllib_modes[] = { - {"a", 1}, - {"b", 1}, - {"g", 1}, - {"?", 1}, - {"N-24G", 5}, - {"N-5G", 4}, + +static const char * const rtllib_modes[] = { + "a", "b", "g", "?", "N-24G", "N-5G" }; #define MAX_CUSTOM_LEN 64 @@ -72,10 +64,9 @@ static inline char *rtl819x_translate_scan(struct rtllib_device *ieee, /* Add the protocol name */ iwe.cmd = SIOCGIWNAME; for (i = 0; i < ARRAY_SIZE(rtllib_modes); i++) { - if (network->mode&(1<<i)) { - sprintf(pname, rtllib_modes[i].mode_string, - rtllib_modes[i].mode_size); - pname += rtllib_modes[i].mode_size; + if (network->mode & BIT(i)) { + strcpy(pname, rtllib_modes[i]); + pname += strlen(rtllib_modes[i]); } } *pname = '\0'; @@ -158,7 +149,8 @@ static inline char *rtl819x_translate_scan(struct rtllib_device *ieee, max_rate = rate; } iwe.cmd = SIOCGIWRATE; - iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; + iwe.u.bitrate.disabled = 0; + iwe.u.bitrate.fixed = 0; iwe.u.bitrate.value = max_rate * 500000; start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_PARAM_LEN); iwe.cmd = IWEVCUSTOM; @@ -285,7 +277,7 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *keybuf) { - struct iw_point *erq = &(wrqu->encoding); + struct iw_point *erq = &wrqu->encoding; struct net_device *dev = ieee->dev; struct rtllib_security sec = { .flags = 0 @@ -312,8 +304,9 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee, netdev_dbg(ieee->dev, "Disabling encryption on key %d.\n", key); lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt); - } else + } else { netdev_dbg(ieee->dev, "Disabling encryption.\n"); + } /* Check all the keys to see if any are still configured, * and if no key index was provided, de-init them all @@ -457,7 +450,7 @@ int rtllib_wx_get_encode(struct rtllib_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *keybuf) { - struct iw_point *erq = &(wrqu->encoding); + struct iw_point *erq = &wrqu->encoding; int len, key; struct lib80211_crypt_data *crypt; @@ -608,7 +601,6 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee, goto done; } *crypt = new_crypt; - } if (ext->key_len > 0 && (*crypt)->ops->set_key && @@ -732,8 +724,9 @@ int rtllib_wx_set_auth(struct rtllib_device *ieee, } else if (data->value & IW_AUTH_ALG_LEAP) { ieee->open_wep = 1; ieee->auth_mode = 2; - } else + } else { return -EINVAL; + } break; case IW_AUTH_WPA_ENABLED: @@ -776,7 +769,7 @@ int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len) kfree(ieee->wps_ie); ieee->wps_ie = NULL; if (len) { - if (len != ie[1]+2) + if (len != ie[1] + 2) return -EINVAL; buf = kmemdup(ie, len, GFP_KERNEL); if (!buf) diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h index 14ca00a2789b..1942cb849374 100644 --- a/drivers/staging/rtl8192u/r8192U.h +++ b/drivers/staging/rtl8192u/r8192U.h @@ -1013,7 +1013,7 @@ typedef struct r8192_priv { bool bis_any_nonbepkts; bool bcurrent_turbo_EDCA; bool bis_cur_rdlstate; - struct timer_list fsync_timer; + struct delayed_work fsync_work; bool bfsync_processing; /* 500ms Fsync timer is active or not */ u32 rate_record; u32 rateCountDiffRecord; diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c index 725bf5ca9e34..00fc8fd344db 100644 --- a/drivers/staging/rtl8192u/r8192U_dm.c +++ b/drivers/staging/rtl8192u/r8192U_dm.c @@ -2578,19 +2578,20 @@ static void dm_init_fsync(struct net_device *dev) priv->ieee80211->fsync_seconddiff_ratethreshold = 200; priv->ieee80211->fsync_state = Default_Fsync; priv->framesyncMonitor = 1; /* current default 0xc38 monitor on */ - timer_setup(&priv->fsync_timer, dm_fsync_timer_callback, 0); + INIT_DELAYED_WORK(&priv->fsync_work, dm_fsync_work_callback); } static void dm_deInit_fsync(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); - del_timer_sync(&priv->fsync_timer); + cancel_delayed_work_sync(&priv->fsync_work); } -void dm_fsync_timer_callback(struct timer_list *t) +void dm_fsync_work_callback(struct work_struct *work) { - struct r8192_priv *priv = from_timer(priv, t, fsync_timer); + struct r8192_priv *priv = + container_of(work, struct r8192_priv, fsync_work.work); struct net_device *dev = priv->ieee80211->dev; u32 rate_index, rate_count = 0, rate_count_diff = 0; bool bSwitchFromCountDiff = false; @@ -2657,17 +2658,16 @@ void dm_fsync_timer_callback(struct timer_list *t) } } if (bDoubleTimeInterval) { - if (timer_pending(&priv->fsync_timer)) - del_timer_sync(&priv->fsync_timer); - priv->fsync_timer.expires = jiffies + - msecs_to_jiffies(priv->ieee80211->fsync_time_interval*priv->ieee80211->fsync_multiple_timeinterval); - add_timer(&priv->fsync_timer); + cancel_delayed_work_sync(&priv->fsync_work); + schedule_delayed_work(&priv->fsync_work, + msecs_to_jiffies(priv + ->ieee80211->fsync_time_interval * + priv->ieee80211->fsync_multiple_timeinterval)); } else { - if (timer_pending(&priv->fsync_timer)) - del_timer_sync(&priv->fsync_timer); - priv->fsync_timer.expires = jiffies + - msecs_to_jiffies(priv->ieee80211->fsync_time_interval); - add_timer(&priv->fsync_timer); + cancel_delayed_work_sync(&priv->fsync_work); + schedule_delayed_work(&priv->fsync_work, + msecs_to_jiffies(priv + ->ieee80211->fsync_time_interval)); } } else { /* Let Register return to default value; */ @@ -2695,7 +2695,7 @@ static void dm_EndSWFsync(struct net_device *dev) struct r8192_priv *priv = ieee80211_priv(dev); RT_TRACE(COMP_HALDM, "%s\n", __func__); - del_timer_sync(&(priv->fsync_timer)); + cancel_delayed_work_sync(&priv->fsync_work); /* Let Register return to default value; */ if (priv->bswitch_fsync) { @@ -2736,11 +2736,9 @@ static void dm_StartSWFsync(struct net_device *dev) if (priv->ieee80211->fsync_rate_bitmap & rateBitmap) priv->rate_record += priv->stats.received_rate_histogram[1][rateIndex]; } - if (timer_pending(&priv->fsync_timer)) - del_timer_sync(&priv->fsync_timer); - priv->fsync_timer.expires = jiffies + - msecs_to_jiffies(priv->ieee80211->fsync_time_interval); - add_timer(&priv->fsync_timer); + cancel_delayed_work_sync(&priv->fsync_work); + schedule_delayed_work(&priv->fsync_work, + msecs_to_jiffies(priv->ieee80211->fsync_time_interval)); write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd); } @@ -3002,7 +3000,7 @@ static void dm_check_txrateandretrycount(struct net_device *dev) /* for initial tx rate */ /*priv->stats.last_packet_rate = read_nic_byte(dev, INITIAL_TX_RATE_REG);*/ read_nic_byte(dev, INITIAL_TX_RATE_REG, &ieee->softmac_stats.last_packet_rate); - /* for tx tx retry count */ + /* for tx retry count */ /*priv->stats.txretrycount = read_nic_dword(dev, TX_RETRY_COUNT_REG);*/ read_nic_dword(dev, TX_RETRY_COUNT_REG, &ieee->softmac_stats.txretrycount); } diff --git a/drivers/staging/rtl8192u/r8192U_dm.h b/drivers/staging/rtl8192u/r8192U_dm.h index 0b2a1c688597..2159018b4e38 100644 --- a/drivers/staging/rtl8192u/r8192U_dm.h +++ b/drivers/staging/rtl8192u/r8192U_dm.h @@ -166,7 +166,7 @@ void dm_force_tx_fw_info(struct net_device *dev, void dm_init_edca_turbo(struct net_device *dev); void dm_rf_operation_test_callback(unsigned long data); void dm_rf_pathcheck_workitemcallback(struct work_struct *work); -void dm_fsync_timer_callback(struct timer_list *t); +void dm_fsync_work_callback(struct work_struct *work); void dm_cck_txpower_adjust(struct net_device *dev, bool binch14); void dm_shadow_init(struct net_device *dev); void dm_initialize_txpower_tracking(struct net_device *dev); diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c index 1bdbd0971f73..f878b04076d8 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c @@ -960,7 +960,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame) return _FAIL; frame_type = GetFrameSubType(pframe); - if (frame_type == WIFI_ASSOCREQ) + if (frame_type == WIFI_ASSOCREQ) ie_offset = _ASOCREQ_IE_OFFSET_; else /* WIFI_REASSOCREQ */ ie_offset = _REASOCREQ_IE_OFFSET_; diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c index 43b5604c0bca..cb6d287f580d 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c @@ -366,9 +366,8 @@ void rtw_cfg80211_ibss_indicate_connect(struct adapter *padapter) int freq = (int)cur_network->network.configuration.ds_config; struct ieee80211_channel *chan; - if (pwdev->iftype != NL80211_IFTYPE_ADHOC) { + if (pwdev->iftype != NL80211_IFTYPE_ADHOC) return; - } if (!rtw_cfg80211_check_bss(padapter)) { struct wlan_bssid_ex *pnetwork = &(padapter->mlmeextpriv.mlmext_info.network); @@ -450,8 +449,8 @@ check_bss: notify_channel = ieee80211_get_channel(wiphy, freq); - roam_info.channel = notify_channel; - roam_info.bssid = cur_network->network.mac_address; + roam_info.links[0].channel = notify_channel; + roam_info.links[0].bssid = cur_network->network.mac_address; roam_info.req_ie = pmlmepriv->assoc_req+sizeof(struct ieee80211_hdr_3addr)+2; roam_info.req_ie_len = @@ -544,9 +543,8 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa goto exit; } - if (wep_key_len > 0) { + if (wep_key_len > 0) wep_key_len = wep_key_len <= 5 ? 5 : 13; - } if (psecuritypriv->bWepDefaultKeyIdxSet == 0) { /* wep default key has not been set, so use this key index as default key. */ @@ -582,9 +580,8 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); psecuritypriv->dot118021XGrpPrivacy = _WEP40_; - if (param->u.crypt.key_len == 13) { + if (param->u.crypt.key_len == 13) psecuritypriv->dot118021XGrpPrivacy = _WEP104_; - } } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) { psecuritypriv->dot118021XGrpPrivacy = _TKIP_; @@ -626,24 +623,16 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa } - if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X && psta) /* psk/802_1x */ - { - if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) - { - if (param->u.crypt.set_tx == 1) /* pairwise key */ - { + if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X && psta) { /* psk/802_1x */ + if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { + if (param->u.crypt.set_tx == 1) { /* pairwise key */ memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); - if (strcmp(param->u.crypt.alg, "WEP") == 0) - { + if (strcmp(param->u.crypt.alg, "WEP") == 0) { psta->dot118021XPrivacy = _WEP40_; if (param->u.crypt.key_len == 13) - { psta->dot118021XPrivacy = _WEP104_; - } - } - else if (strcmp(param->u.crypt.alg, "TKIP") == 0) - { + } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) { psta->dot118021XPrivacy = _TKIP_; /* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */ @@ -653,14 +642,10 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa psecuritypriv->busetkipkey = true; - } - else if (strcmp(param->u.crypt.alg, "CCMP") == 0) - { + } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) { psta->dot118021XPrivacy = _AES_; - } - else - { + } else { psta->dot118021XPrivacy = _NO_PRIVACY_; } @@ -670,21 +655,14 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa psta->bpairwise_key_installed = true; - } - else/* group key??? */ - { - if (strcmp(param->u.crypt.alg, "WEP") == 0) - { + } else { /* group key??? */ + if (strcmp(param->u.crypt.alg, "WEP") == 0) { memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); psecuritypriv->dot118021XGrpPrivacy = _WEP40_; if (param->u.crypt.key_len == 13) - { psecuritypriv->dot118021XGrpPrivacy = _WEP104_; - } - } - else if (strcmp(param->u.crypt.alg, "TKIP") == 0) - { + } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) { psecuritypriv->dot118021XGrpPrivacy = _TKIP_; memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); @@ -696,15 +674,11 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa psecuritypriv->busetkipkey = true; - } - else if (strcmp(param->u.crypt.alg, "CCMP") == 0) - { + } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) { psecuritypriv->dot118021XGrpPrivacy = _AES_; memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); - } - else - { + } else { psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_; } @@ -717,8 +691,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx); pbcmc_sta = rtw_get_bcmc_stainfo(padapter); - if (pbcmc_sta) - { + if (pbcmc_sta) { pbcmc_sta->ieee8021x_blocked = false; pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */ } @@ -746,20 +719,16 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param param->u.crypt.err = 0; param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0'; - if (param_len < (u32) ((u8 *) param->u.crypt.key - (u8 *) param) + param->u.crypt.key_len) - { + if (param_len < (u32) ((u8 *) param->u.crypt.key - (u8 *) param) + param->u.crypt.key_len) { ret = -EINVAL; goto exit; } if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff && param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff && - param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) - { + param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) { if (param->u.crypt.idx >= WEP_KEYS - || param->u.crypt.idx >= BIP_MAX_KEYID - ) - { + || param->u.crypt.idx >= BIP_MAX_KEYID) { ret = -EINVAL; goto exit; } @@ -770,19 +739,16 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param } } - if (strcmp(param->u.crypt.alg, "WEP") == 0) - { + if (strcmp(param->u.crypt.alg, "WEP") == 0) { wep_key_idx = param->u.crypt.idx; wep_key_len = param->u.crypt.key_len; - if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0)) - { + if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0)) { ret = -EINVAL; goto exit; } - if (psecuritypriv->bWepDefaultKeyIdxSet == 0) - { + if (psecuritypriv->bWepDefaultKeyIdxSet == 0) { /* wep default key has not been set, so use this key index as default key. */ wep_key_len = wep_key_len <= 5 ? 5 : 13; @@ -791,8 +757,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param psecuritypriv->dot11PrivacyAlgrthm = _WEP40_; psecuritypriv->dot118021XGrpPrivacy = _WEP40_; - if (wep_key_len == 13) - { + if (wep_key_len == 13) { psecuritypriv->dot11PrivacyAlgrthm = _WEP104_; psecuritypriv->dot118021XGrpPrivacy = _WEP104_; } @@ -809,13 +774,11 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param goto exit; } - if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) /* 802_1x */ - { + if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) { /* 802_1x */ struct sta_info *psta, *pbcmc_sta; struct sta_priv *pstapriv = &padapter->stapriv; - if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_MP_STATE) == true) /* sta mode */ - { + if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_MP_STATE) == true) { /* sta mode */ psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv)); if (psta) { /* Jeff: don't disable ieee8021x_blocked while clearing key */ @@ -824,18 +787,15 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) || - (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) - { + (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) { psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm; } - if (param->u.crypt.set_tx == 1)/* pairwise key */ - { + if (param->u.crypt.set_tx == 1) { /* pairwise key */ memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); - if (strcmp(param->u.crypt.alg, "TKIP") == 0)/* set mic key */ - { + if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */ /* DEBUG_ERR(("\nset key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len)); */ memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8); memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8); @@ -845,11 +805,8 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param } rtw_setstakey_cmd(padapter, psta, true, true); - } - else/* group key */ - { - if (strcmp(param->u.crypt.alg, "TKIP") == 0 || strcmp(param->u.crypt.alg, "CCMP") == 0) - { + } else { /* group key */ + if (strcmp(param->u.crypt.alg, "TKIP") == 0 || strcmp(param->u.crypt.alg, "CCMP") == 0) { memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8); memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8); @@ -857,9 +814,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx; rtw_set_key(padapter, &padapter->securitypriv, param->u.crypt.idx, 1, true); - } - else if (strcmp(param->u.crypt.alg, "BIP") == 0) - { + } else if (strcmp(param->u.crypt.alg, "BIP") == 0) { /* save the IGTK key, length 16 bytes */ memcpy(padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); /* @@ -873,25 +828,19 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param } pbcmc_sta = rtw_get_bcmc_stainfo(padapter); - if (!pbcmc_sta) - { + if (!pbcmc_sta) { /* DEBUG_ERR(("Set OID_802_11_ADD_KEY: bcmc stainfo is null\n")); */ - } - else - { + } else { /* Jeff: don't disable ieee8021x_blocked while clearing key */ if (strcmp(param->u.crypt.alg, "none") != 0) pbcmc_sta->ieee8021x_blocked = false; if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) || - (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) - { + (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) { pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm; } } - } - else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) /* adhoc mode */ - { + } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) { /* adhoc mode */ } } @@ -949,39 +898,29 @@ static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev, if (!mac_addr || is_broadcast_ether_addr(mac_addr)) - { param->u.crypt.set_tx = 0; /* for wpa/wpa2 group key */ - } else { + else param->u.crypt.set_tx = 1; /* for wpa/wpa2 pairwise key */ - } param->u.crypt.idx = key_index; if (params->seq_len && params->seq) - { memcpy(param->u.crypt.seq, (u8 *)params->seq, params->seq_len); - } - if (params->key_len && params->key) - { + if (params->key_len && params->key) { param->u.crypt.key_len = params->key_len; memcpy(param->u.crypt.key, (u8 *)params->key, params->key_len); } - if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) - { + if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) { ret = rtw_cfg80211_set_encryption(ndev, param, param_len); - } - else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) - { + } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) { if (mac_addr) memcpy(param->sta_addr, (void *)mac_addr, ETH_ALEN); ret = rtw_cfg80211_ap_set_encryption(ndev, param, param_len); - } - else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true - || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) - { + } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true + || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) { ret = rtw_cfg80211_set_encryption(ndev, param, param_len); } @@ -1007,8 +946,7 @@ static int cfg80211_rtw_del_key(struct wiphy *wiphy, struct net_device *ndev, struct adapter *padapter = rtw_netdev_priv(ndev); struct security_priv *psecuritypriv = &padapter->securitypriv; - if (key_index == psecuritypriv->dot11PrivacyKeyIndex) - { + if (key_index == psecuritypriv->dot11PrivacyKeyIndex) { /* clear the flag of wep default key set. */ psecuritypriv->bWepDefaultKeyIdxSet = 0; } @@ -1024,16 +962,14 @@ static int cfg80211_rtw_set_default_key(struct wiphy *wiphy, struct adapter *padapter = rtw_netdev_priv(ndev); struct security_priv *psecuritypriv = &padapter->securitypriv; - if ((key_index < WEP_KEYS) && ((psecuritypriv->dot11PrivacyAlgrthm == _WEP40_) || (psecuritypriv->dot11PrivacyAlgrthm == _WEP104_))) /* set wep default key */ - { + if ((key_index < WEP_KEYS) && ((psecuritypriv->dot11PrivacyAlgrthm == _WEP40_) || (psecuritypriv->dot11PrivacyAlgrthm == _WEP104_))) { /* set wep default key */ psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled; psecuritypriv->dot11PrivacyKeyIndex = key_index; psecuritypriv->dot11PrivacyAlgrthm = _WEP40_; psecuritypriv->dot118021XGrpPrivacy = _WEP40_; - if (psecuritypriv->dot11DefKeylen[key_index] == 13) - { + if (psecuritypriv->dot11DefKeylen[key_index] == 13) { psecuritypriv->dot11PrivacyAlgrthm = _WEP104_; psecuritypriv->dot118021XGrpPrivacy = _WEP104_; } @@ -1071,9 +1007,7 @@ static int cfg80211_rtw_get_station(struct wiphy *wiphy, /* for infra./P2PClient mode */ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) - && check_fwstate(pmlmepriv, _FW_LINKED) - ) - { + && check_fwstate(pmlmepriv, _FW_LINKED)) { struct wlan_network *cur_network = &(pmlmepriv->cur_network); if (memcmp((u8 *)mac, cur_network->network.mac_address, ETH_ALEN)) { @@ -1099,9 +1033,7 @@ static int cfg80211_rtw_get_station(struct wiphy *wiphy, if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) || check_fwstate(pmlmepriv, WIFI_AP_STATE)) - && check_fwstate(pmlmepriv, _FW_LINKED) - ) - { + && check_fwstate(pmlmepriv, _FW_LINKED)) { /* TODO: should acquire station info... */ } @@ -1121,8 +1053,7 @@ static int cfg80211_rtw_change_iface(struct wiphy *wiphy, struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv); int ret = 0; - if (adapter_to_dvobj(padapter)->processing_dev_remove == true) - { + if (adapter_to_dvobj(padapter)->processing_dev_remove == true) { ret = -EPERM; goto exit; } @@ -1141,8 +1072,7 @@ static int cfg80211_rtw_change_iface(struct wiphy *wiphy, old_type = rtw_wdev->iftype; - if (old_type != type) - { + if (old_type != type) { pmlmeext->action_public_rxseq = 0xffff; pmlmeext->action_public_dialog_token = 0xff; } @@ -1164,8 +1094,7 @@ static int cfg80211_rtw_change_iface(struct wiphy *wiphy, rtw_wdev->iftype = type; - if (rtw_set_802_11_infrastructure_mode(padapter, networkType) == false) - { + if (rtw_set_802_11_infrastructure_mode(padapter, networkType) == false) { rtw_wdev->iftype = old_type; ret = -EPERM; goto exit; @@ -1230,9 +1159,7 @@ void rtw_cfg80211_surveydone_event_callback(struct adapter *padapter) /* report network only if the current channel set contains the channel to which this network belongs */ if (rtw_ch_set_search_ch(padapter->mlmeextpriv.channel_set, pnetwork->network.configuration.ds_config) >= 0 - && true == rtw_validate_ssid(&(pnetwork->network.ssid)) - ) - { + && true == rtw_validate_ssid(&(pnetwork->network.ssid))) { /* ev =translate_scan(padapter, a, pnetwork, ev, stop); */ rtw_cfg80211_inform_bss(padapter, pnetwork); } @@ -1249,13 +1176,10 @@ static int rtw_cfg80211_set_probe_req_wpsp2pie(struct adapter *padapter, char *b u8 *wps_ie; struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); - if (len > 0) - { + if (len > 0) { wps_ie = rtw_get_wps_ie(buf, len, NULL, &wps_ielen); - if (wps_ie) - { - if (pmlmepriv->wps_probe_req_ie) - { + if (wps_ie) { + if (pmlmepriv->wps_probe_req_ie) { pmlmepriv->wps_probe_req_ie_len = 0; kfree(pmlmepriv->wps_probe_req_ie); pmlmepriv->wps_probe_req_ie = NULL; @@ -1307,10 +1231,8 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy pwdev_priv->scan_request = request; spin_unlock_bh(&pwdev_priv->scan_req_lock); - if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) - { - if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS|_FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true) - { + if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) { + if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS|_FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true) { need_indicate_scan_done = true; goto check_need_indicate_scan_done; } @@ -1333,15 +1255,13 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy goto check_need_indicate_scan_done; } - if (pmlmepriv->LinkDetectInfo.bBusyTraffic == true) - { - static unsigned long lastscantime = 0; + if (pmlmepriv->LinkDetectInfo.bBusyTraffic == true) { + static unsigned long lastscantime; unsigned long passtime; passtime = jiffies_to_msecs(jiffies - lastscantime); lastscantime = jiffies; - if (passtime > 12000) - { + if (passtime > 12000) { need_indicate_scan_done = true; goto check_need_indicate_scan_done; } @@ -1380,9 +1300,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy } else if (request->n_channels <= 4) { for (j = request->n_channels - 1; j >= 0; j--) for (i = 0; i < survey_times; i++) - { memcpy(&ch[j*survey_times+i], &ch[j], sizeof(struct rtw_ieee80211_channel)); - } _status = rtw_sitesurvey_cmd(padapter, ssid, RTW_SSID_SCAN_AMOUNT, ch, survey_times * request->n_channels); } else { _status = rtw_sitesurvey_cmd(padapter, ssid, RTW_SSID_SCAN_AMOUNT, NULL, 0); @@ -1391,14 +1309,11 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy if (_status == false) - { ret = -1; - } check_need_indicate_scan_done: kfree(ssid); - if (need_indicate_scan_done) - { + if (need_indicate_scan_done) { rtw_cfg80211_surveydone_event_callback(padapter); rtw_cfg80211_indicate_scan_done(padapter, false); } @@ -1424,9 +1339,7 @@ static int rtw_cfg80211_set_wpa_version(struct security_priv *psecuritypriv, u32 if (wpa_version & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2)) - { psecuritypriv->ndisauthtype = Ndis802_11AuthModeWPAPSK; - } return 0; @@ -1585,8 +1498,7 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel if (pairwise_cipher == 0) pairwise_cipher = WPA_CIPHER_NONE; - switch (group_cipher) - { + switch (group_cipher) { case WPA_CIPHER_NONE: padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_; padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled; @@ -1609,8 +1521,7 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel break; } - switch (pairwise_cipher) - { + switch (pairwise_cipher) { case WPA_CIPHER_NONE: padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_; padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled; @@ -1731,8 +1642,7 @@ static int cfg80211_rtw_leave_ibss(struct wiphy *wiphy, struct net_device *ndev) rtw_wdev->iftype = NL80211_IFTYPE_STATION; - if (rtw_set_802_11_infrastructure_mode(padapter, Ndis802_11Infrastructure) == false) - { + if (rtw_set_802_11_infrastructure_mode(padapter, Ndis802_11Infrastructure) == false) { rtw_wdev->iftype = old_type; ret = -EPERM; goto leave_ibss; @@ -1792,9 +1702,8 @@ static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev, ret = -EBUSY; goto exit; } - if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) { + if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) rtw_scan_abort(padapter); - } psecuritypriv->ndisencryptstatus = Ndis802_11EncryptionDisabled; psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_; @@ -2086,6 +1995,7 @@ static u8 rtw_get_chan_type(struct adapter *adapter) } static int cfg80211_rtw_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev, + unsigned int link_id, struct cfg80211_chan_def *chandef) { struct adapter *adapter = wiphy_to_adapter(wiphy); @@ -2287,9 +2197,8 @@ static int rtw_cfg80211_add_monitor_if(struct adapter *padapter, char *name, str mon_ndev->ieee80211_ptr = mon_wdev; ret = cfg80211_register_netdevice(mon_ndev); - if (ret) { + if (ret) goto out; - } *ndev = pwdev_priv->pmon_ndev = mon_ndev; memcpy(pwdev_priv->ifname_mon, name, IFNAMSIZ+1); @@ -2402,11 +2311,10 @@ static int rtw_add_beacon(struct adapter *adapter, const u8 *head, size_t head_l rtw_ies_remove_ie(pbuf, &len, _BEACON_IE_OFFSET_, WLAN_EID_VENDOR_SPECIFIC, P2P_OUI, 4); rtw_ies_remove_ie(pbuf, &len, _BEACON_IE_OFFSET_, WLAN_EID_VENDOR_SPECIFIC, WFD_OUI, 4); - if (rtw_check_beacon_data(adapter, pbuf, len) == _SUCCESS) { + if (rtw_check_beacon_data(adapter, pbuf, len) == _SUCCESS) ret = 0; - } else { + else ret = -EINVAL; - } kfree(pbuf); @@ -2446,7 +2354,8 @@ static int cfg80211_rtw_change_beacon(struct wiphy *wiphy, struct net_device *nd return rtw_add_beacon(adapter, info->head, info->head_len, info->tail, info->tail_len); } -static int cfg80211_rtw_stop_ap(struct wiphy *wiphy, struct net_device *ndev) +static int cfg80211_rtw_stop_ap(struct wiphy *wiphy, struct net_device *ndev, + unsigned int link_id) { return 0; } diff --git a/drivers/staging/rts5208/spi.c b/drivers/staging/rts5208/spi.c index f1e9e80044ed..e88fe1a998f8 100644 --- a/drivers/staging/rts5208/spi.c +++ b/drivers/staging/rts5208/spi.c @@ -460,10 +460,8 @@ int spi_set_parameter(struct scsi_cmnd *srb, struct rtsx_chip *chip) spi->clk_div = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5]; spi->write_en = srb->cmnd[6]; - dev_dbg(rtsx_dev(chip), "%s: ", __func__); - dev_dbg(rtsx_dev(chip), "spi_clock = %d, ", spi->spi_clock); - dev_dbg(rtsx_dev(chip), "clk_div = %d, ", spi->clk_div); - dev_dbg(rtsx_dev(chip), "write_en = %d\n", spi->write_en); + dev_dbg(rtsx_dev(chip), "spi_clock = %d, clk_div = %d, write_en = %d\n", + spi->spi_clock, spi->clk_div, spi->write_en); return STATUS_SUCCESS; } diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c index 029d9acec47d..e0c7ff3352bf 100644 --- a/drivers/staging/sm750fb/ddk750_dvi.c +++ b/drivers/staging/sm750fb/ddk750_dvi.c @@ -15,7 +15,7 @@ static struct dvi_ctrl_device dcft_supported_dvi_controller[] = { #ifdef DVI_CTRL_SII164 { .init = sii164InitChip, - .get_vendor_id = sii164GetVendorID, + .get_vendor_id = sii164_get_vendor_id, .get_device_id = sii164GetDeviceID, #ifdef SII164_FULL_FUNCTIONS .reset_chip = sii164ResetChip, diff --git a/drivers/staging/sm750fb/ddk750_power.h b/drivers/staging/sm750fb/ddk750_power.h index 7002567a47d2..63c9e8b6ffb3 100644 --- a/drivers/staging/sm750fb/ddk750_power.h +++ b/drivers/staging/sm750fb/ddk750_power.h @@ -15,7 +15,7 @@ enum dpms { } void ddk750_set_dpms(enum dpms state); -void sm750_set_power_mode(unsigned int powerMode); +void sm750_set_power_mode(unsigned int mode); void sm750_set_current_gate(unsigned int gate); /* diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c index 73e0e9f41ec5..3da1796cd7aa 100644 --- a/drivers/staging/sm750fb/ddk750_sii164.c +++ b/drivers/staging/sm750fb/ddk750_sii164.c @@ -29,13 +29,13 @@ static char *gDviCtrlChipName = "Silicon Image SiI 164"; #endif /* - * sii164GetVendorID + * sii164_get_vendor_id * This function gets the vendor ID of the DVI controller chip. * * Output: * Vendor ID */ -unsigned short sii164GetVendorID(void) +unsigned short sii164_get_vendor_id(void) { unsigned short vendorID; @@ -140,7 +140,7 @@ long sii164InitChip(unsigned char edge_select, #endif /* Check if SII164 Chip exists */ - if ((sii164GetVendorID() == SII164_VENDOR_ID) && + if ((sii164_get_vendor_id() == SII164_VENDOR_ID) && (sii164GetDeviceID() == SII164_DEVICE_ID)) { /* * Initialize SII164 controller chip. diff --git a/drivers/staging/sm750fb/ddk750_sii164.h b/drivers/staging/sm750fb/ddk750_sii164.h index d940cb729066..ca330f6a43e2 100644 --- a/drivers/staging/sm750fb/ddk750_sii164.h +++ b/drivers/staging/sm750fb/ddk750_sii164.h @@ -27,7 +27,7 @@ long sii164InitChip(unsigned char edgeSelect, unsigned char pllFilterEnable, unsigned char pllFilterValue); -unsigned short sii164GetVendorID(void); +unsigned short sii164_get_vendor_id(void); unsigned short sii164GetDeviceID(void); #ifdef SII164_FULL_FUNCTIONS diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c index e429b33b4d39..f4c2c9506d86 100644 --- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c +++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c @@ -25,12 +25,14 @@ MODULE_PARM_DESC(force_bulk, "Force use of vchiq bulk for audio"); static void bcm2835_audio_lock(struct bcm2835_audio_instance *instance) { mutex_lock(&instance->vchi_mutex); - vchiq_use_service(instance->service_handle); + vchiq_use_service(instance->alsa_stream->chip->vchi_ctx->instance, + instance->service_handle); } static void bcm2835_audio_unlock(struct bcm2835_audio_instance *instance) { - vchiq_release_service(instance->service_handle); + vchiq_release_service(instance->alsa_stream->chip->vchi_ctx->instance, + instance->service_handle); mutex_unlock(&instance->vchi_mutex); } @@ -44,8 +46,8 @@ static int bcm2835_audio_send_msg_locked(struct bcm2835_audio_instance *instance init_completion(&instance->msg_avail_comp); } - status = vchiq_queue_kernel_message(instance->service_handle, - m, sizeof(*m)); + status = vchiq_queue_kernel_message(instance->alsa_stream->chip->vchi_ctx->instance, + instance->service_handle, m, sizeof(*m)); if (status) { dev_err(instance->dev, "vchi message queue failed: %d, msg=%d\n", @@ -89,11 +91,13 @@ static int bcm2835_audio_send_simple(struct bcm2835_audio_instance *instance, return bcm2835_audio_send_msg(instance, &m, wait); } -static enum vchiq_status audio_vchi_callback(enum vchiq_reason reason, +static enum vchiq_status audio_vchi_callback(struct vchiq_instance *vchiq_instance, + enum vchiq_reason reason, struct vchiq_header *header, unsigned int handle, void *userdata) { - struct bcm2835_audio_instance *instance = vchiq_get_service_userdata(handle); + struct bcm2835_audio_instance *instance = vchiq_get_service_userdata(vchiq_instance, + handle); struct vc_audio_msg *m; if (reason != VCHIQ_MESSAGE_AVAILABLE) @@ -114,7 +118,7 @@ static enum vchiq_status audio_vchi_callback(enum vchiq_reason reason, dev_err(instance->dev, "unexpected callback type=%d\n", m->type); } - vchiq_release_message(handle, header); + vchiq_release_message(vchiq_instance, instance->service_handle, header); return VCHIQ_SUCCESS; } @@ -143,7 +147,8 @@ vc_vchi_audio_init(struct vchiq_instance *vchiq_instance, } /* Finished with the service for now */ - vchiq_release_service(instance->service_handle); + vchiq_release_service(instance->alsa_stream->chip->vchi_ctx->instance, + instance->service_handle); return 0; } @@ -153,10 +158,12 @@ static void vc_vchi_audio_deinit(struct bcm2835_audio_instance *instance) int status; mutex_lock(&instance->vchi_mutex); - vchiq_use_service(instance->service_handle); + vchiq_use_service(instance->alsa_stream->chip->vchi_ctx->instance, + instance->service_handle); /* Close all VCHI service connections */ - status = vchiq_close_service(instance->service_handle); + status = vchiq_close_service(instance->alsa_stream->chip->vchi_ctx->instance, + instance->service_handle); if (status) { dev_err(instance->dev, "failed to close VCHI service connection (status=%d)\n", @@ -226,7 +233,7 @@ int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream) goto deinit; bcm2835_audio_lock(instance); - vchiq_get_peer_version(instance->service_handle, + vchiq_get_peer_version(vchi_ctx->instance, instance->service_handle, &instance->peer_version); bcm2835_audio_unlock(instance); if (instance->peer_version < 2 || force_bulk) @@ -322,6 +329,8 @@ int bcm2835_audio_write(struct bcm2835_alsa_stream *alsa_stream, unsigned int size, void *src) { struct bcm2835_audio_instance *instance = alsa_stream->instance; + struct bcm2835_vchi_ctx *vchi_ctx = alsa_stream->chip->vchi_ctx; + struct vchiq_instance *vchiq_instance = vchi_ctx->instance; struct vc_audio_msg m = { .type = VC_AUDIO_MSG_TYPE_WRITE, .write.count = size, @@ -343,15 +352,14 @@ int bcm2835_audio_write(struct bcm2835_alsa_stream *alsa_stream, count = size; if (!instance->max_packet) { /* Send the message to the videocore */ - status = vchiq_bulk_transmit(instance->service_handle, src, - count, NULL, - VCHIQ_BULK_MODE_BLOCKING); + status = vchiq_bulk_transmit(vchiq_instance, instance->service_handle, src, count, + NULL, VCHIQ_BULK_MODE_BLOCKING); } else { while (count > 0) { int bytes = min(instance->max_packet, count); - status = vchiq_queue_kernel_message(instance->service_handle, - src, bytes); + status = vchiq_queue_kernel_message(vchiq_instance, + instance->service_handle, src, bytes); src += bytes; count -= bytes; } diff --git a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h index c93f2f3e87bb..db1441c0cc66 100644 --- a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h +++ b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h @@ -53,9 +53,12 @@ struct vchiq_element { unsigned int size; }; +struct vchiq_instance; + struct vchiq_service_base { int fourcc; - enum vchiq_status (*callback)(enum vchiq_reason reason, + enum vchiq_status (*callback)(struct vchiq_instance *instance, + enum vchiq_reason reason, struct vchiq_header *header, unsigned int handle, void *bulk_userdata); @@ -71,7 +74,8 @@ struct vchiq_completion_data_kernel { struct vchiq_service_params_kernel { int fourcc; - enum vchiq_status (*callback)(enum vchiq_reason reason, + enum vchiq_status (*callback)(struct vchiq_instance *instance, + enum vchiq_reason reason, struct vchiq_header *header, unsigned int handle, void *bulk_userdata); @@ -88,23 +92,27 @@ extern enum vchiq_status vchiq_connect(struct vchiq_instance *instance); extern enum vchiq_status vchiq_open_service(struct vchiq_instance *instance, const struct vchiq_service_params_kernel *params, unsigned int *pservice); -extern enum vchiq_status vchiq_close_service(unsigned int service); -extern enum vchiq_status vchiq_use_service(unsigned int service); -extern enum vchiq_status vchiq_release_service(unsigned int service); -extern void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header); -extern void vchiq_release_message(unsigned int service, - struct vchiq_header *header); -extern int vchiq_queue_kernel_message(unsigned int handle, void *data, - unsigned int size); -extern enum vchiq_status vchiq_bulk_transmit(unsigned int service, - const void *data, unsigned int size, void *userdata, - enum vchiq_bulk_mode mode); -extern enum vchiq_status vchiq_bulk_receive(unsigned int service, - void *data, unsigned int size, void *userdata, - enum vchiq_bulk_mode mode); -extern void *vchiq_get_service_userdata(unsigned int service); -extern enum vchiq_status vchiq_get_peer_version(unsigned int handle, - short *peer_version); -extern struct vchiq_header *vchiq_msg_hold(unsigned int handle); +extern enum vchiq_status vchiq_close_service(struct vchiq_instance *instance, + unsigned int service); +extern enum vchiq_status vchiq_use_service(struct vchiq_instance *instance, unsigned int service); +extern enum vchiq_status vchiq_release_service(struct vchiq_instance *instance, + unsigned int service); +extern void vchiq_msg_queue_push(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_header *header); +extern void vchiq_release_message(struct vchiq_instance *instance, unsigned int service, + struct vchiq_header *header); +extern int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle, + void *data, unsigned int size); +extern enum vchiq_status vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int service, + const void *data, unsigned int size, void *userdata, + enum vchiq_bulk_mode mode); +extern enum vchiq_status vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int service, + void *data, unsigned int size, void *userdata, + enum vchiq_bulk_mode mode); +extern void *vchiq_get_service_userdata(struct vchiq_instance *instance, unsigned int service); +extern enum vchiq_status vchiq_get_peer_version(struct vchiq_instance *instance, + unsigned int handle, + short *peer_version); +extern struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned int handle); #endif /* VCHIQ_H */ diff --git a/drivers/staging/vc04_services/interface/TESTING b/drivers/staging/vc04_services/interface/TESTING new file mode 100644 index 000000000000..a6d63efcbcb9 --- /dev/null +++ b/drivers/staging/vc04_services/interface/TESTING @@ -0,0 +1,82 @@ +This document contains some hints to test the function of the VCHIQ driver +without having additional hardware to the Raspberry Pi. + +* Requirements & limitations + +Testing the VCHIQ driver requires a Raspberry Pi with one of the following SoC: + - BCM2835 ( e.g. Raspberry Pi Zero W ) + - BCM2836 ( e.g. Raspberry Pi 2 ) + - BCM2837 ( e.g. Raspberry Pi 3 B+ ) + +The BCM2711 used in the Raspberry Pi 4 is currently not supported in the +mainline kernel. + +There are no specific requirements to the VideoCore firmware to get VCHIQ +working. + +The test scenarios described in this document based on the tool vchiq_test. +Its source code is available here: https://github.com/raspberrypi/userland + +* Configuration + +Here are the most common kernel configurations: + + 1. BCM2835 target SoC (ARM 32 bit) + + Just use bcm2835_defconfig which already has VCHIQ enabled. + + 2. BCM2836/7 target SoC (ARM 32 bit) + + Use the multi_v7_defconfig as a base and then enable all VCHIQ options. + + 3. BCM2837 target SoC (ARM 64 bit) + + Use the defconfig as a base and then enable all VCHIQ options. + +* Scenarios + + * Initial test + + Check the driver is probed and /dev/vchiq is created + + * Functional test + + Command: vchiq_test -f 10 + + Expected output: + Functional test - iters:10 + ======== iteration 1 ======== + Testing bulk transfer for alignment. + Testing bulk transfer at PAGE_SIZE. + ... + + * Ping test + + Command: vchiq_test -p 1 + + Expected output: + Ping test - service:echo, iters:1, version 3 + vchi ping (size 0) -> 57.000000us + vchi ping (size 0, 0 async, 0 oneway) -> 122.000000us + vchi bulk (size 0, 0 async, 0 oneway) -> 546.000000us + vchi bulk (size 0, 0 oneway) -> 230.000000us + vchi ping (size 0) -> 49.000000us + vchi ping (size 0, 0 async, 0 oneway) -> 70.000000us + vchi bulk (size 0, 0 async, 0 oneway) -> 296.000000us + vchi bulk (size 0, 0 oneway) -> 266.000000us + vchi ping (size 0, 1 async, 0 oneway) -> 65.000000us + vchi bulk (size 0, 0 oneway) -> 456.000000us + vchi ping (size 0, 2 async, 0 oneway) -> 74.000000us + vchi bulk (size 0, 0 oneway) -> 640.000000us + vchi ping (size 0, 10 async, 0 oneway) -> 125.000000us + vchi bulk (size 0, 0 oneway) -> 2309.000000us + vchi ping (size 0, 0 async, 1 oneway) -> 70.000000us + vchi ping (size 0, 0 async, 2 oneway) -> 76.000000us + vchi ping (size 0, 0 async, 10 oneway) -> 105.000000us + vchi ping (size 0, 10 async, 10 oneway) -> 165.000000us + vchi ping (size 0, 100 async, 0 oneway) -> nanus + vchi bulk (size 0, 0 oneway) -> nanus + vchi ping (size 0, 0 async, 100 oneway) -> nanus + vchi ping (size 0, 100 async, 100 oneway) -> infus + vchi ping (size 0, 200 async, 0 oneway) -> infus + ... diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 0596ac61e286..dc33490ba7fb 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -148,12 +148,11 @@ static unsigned int g_fragments_size; static char *g_fragments_base; static char *g_free_fragments; static struct semaphore g_free_fragments_sema; -static struct device *g_dev; static DEFINE_SEMAPHORE(g_free_fragments_mutex); static enum vchiq_status -vchiq_blocking_bulk_transfer(unsigned int handle, void *data, +vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data, unsigned int size, enum vchiq_bulk_dir dir); static irqreturn_t @@ -175,17 +174,17 @@ vchiq_doorbell_irq(int irq, void *dev_id) } static void -cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo) +cleanup_pagelistinfo(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo) { if (pagelistinfo->scatterlist_mapped) { - dma_unmap_sg(g_dev, pagelistinfo->scatterlist, + dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist, pagelistinfo->num_pages, pagelistinfo->dma_dir); } if (pagelistinfo->pages_need_release) unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages); - dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size, + dma_free_coherent(instance->state->dev, pagelistinfo->pagelist_buffer_size, pagelistinfo->pagelist, pagelistinfo->dma_addr); } @@ -212,7 +211,7 @@ is_adjacent_block(u32 *addrs, u32 addr, unsigned int k) */ static struct vchiq_pagelist_info * -create_pagelist(char *buf, char __user *ubuf, +create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf, size_t count, unsigned short type) { struct pagelist *pagelist; @@ -250,7 +249,7 @@ create_pagelist(char *buf, char __user *ubuf, /* Allocate enough storage to hold the page pointers and the page * list */ - pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr, + pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr, GFP_KERNEL); vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist); @@ -292,7 +291,7 @@ create_pagelist(char *buf, char __user *ubuf, size_t bytes = PAGE_SIZE - off; if (!pg) { - cleanup_pagelistinfo(pagelistinfo); + cleanup_pagelistinfo(instance, pagelistinfo); return NULL; } @@ -315,7 +314,7 @@ create_pagelist(char *buf, char __user *ubuf, /* This is probably due to the process being killed */ if (actual_pages > 0) unpin_user_pages(pages, actual_pages); - cleanup_pagelistinfo(pagelistinfo); + cleanup_pagelistinfo(instance, pagelistinfo); return NULL; } /* release user pages */ @@ -338,13 +337,13 @@ create_pagelist(char *buf, char __user *ubuf, count -= len; } - dma_buffers = dma_map_sg(g_dev, + dma_buffers = dma_map_sg(instance->state->dev, scatterlist, num_pages, pagelistinfo->dma_dir); if (dma_buffers == 0) { - cleanup_pagelistinfo(pagelistinfo); + cleanup_pagelistinfo(instance, pagelistinfo); return NULL; } @@ -378,7 +377,7 @@ create_pagelist(char *buf, char __user *ubuf, char *fragments; if (down_interruptible(&g_free_fragments_sema)) { - cleanup_pagelistinfo(pagelistinfo); + cleanup_pagelistinfo(instance, pagelistinfo); return NULL; } @@ -397,7 +396,7 @@ create_pagelist(char *buf, char __user *ubuf, } static void -free_pagelist(struct vchiq_pagelist_info *pagelistinfo, +free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo, int actual) { struct pagelist *pagelist = pagelistinfo->pagelist; @@ -411,7 +410,7 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo, * NOTE: dma_unmap_sg must be called before the * cpu can touch any of the data/pages. */ - dma_unmap_sg(g_dev, pagelistinfo->scatterlist, + dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist, pagelistinfo->num_pages, pagelistinfo->dma_dir); pagelistinfo->scatterlist_mapped = 0; @@ -460,7 +459,7 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo, set_page_dirty(pages[i]); } - cleanup_pagelistinfo(pagelistinfo); + cleanup_pagelistinfo(instance, pagelistinfo); } int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state) @@ -519,7 +518,7 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state) *(char **)&g_fragments_base[i * g_fragments_size] = NULL; sema_init(&g_free_fragments_sema, MAX_FRAGMENTS); - err = vchiq_init_state(state, vchiq_slot_zero); + err = vchiq_init_state(state, vchiq_slot_zero, dev); if (err) return err; @@ -547,7 +546,6 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state) return err ? : -ENXIO; } - g_dev = dev; vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)", vchiq_slot_zero, &slot_phys); @@ -604,6 +602,10 @@ static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state * void remote_event_signal(struct remote_event *event) { + /* + * Ensure that all writes to shared data structures have completed + * before signalling the peer. + */ wmb(); event->fired = 1; @@ -615,12 +617,12 @@ remote_event_signal(struct remote_event *event) } int -vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, +vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset, void __user *uoffset, int size, int dir) { struct vchiq_pagelist_info *pagelistinfo; - pagelistinfo = create_pagelist(offset, uoffset, size, + pagelistinfo = create_pagelist(instance, offset, uoffset, size, (dir == VCHIQ_BULK_RECEIVE) ? PAGELIST_READ : PAGELIST_WRITE); @@ -640,10 +642,10 @@ vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, } void -vchiq_complete_bulk(struct vchiq_bulk *bulk) +vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk) { if (bulk && bulk->remote_data && bulk->actual) - free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data, + free_pagelist(instance, (struct vchiq_pagelist_info *)bulk->remote_data, bulk->actual); } @@ -821,7 +823,7 @@ vchiq_open_service(struct vchiq_instance *instance, *phandle = service->handle; status = vchiq_open_service_internal(service, current->pid); if (status != VCHIQ_SUCCESS) { - vchiq_remove_service(service->handle); + vchiq_remove_service(instance, service->handle); *phandle = VCHIQ_SERVICE_HANDLE_INVALID; } } @@ -834,8 +836,8 @@ failed: EXPORT_SYMBOL(vchiq_open_service); enum vchiq_status -vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size, - void *userdata, enum vchiq_bulk_mode mode) +vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data, + unsigned int size, void *userdata, enum vchiq_bulk_mode mode) { enum vchiq_status status; @@ -843,13 +845,13 @@ vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size, switch (mode) { case VCHIQ_BULK_MODE_NOCALLBACK: case VCHIQ_BULK_MODE_CALLBACK: - status = vchiq_bulk_transfer(handle, + status = vchiq_bulk_transfer(instance, handle, (void *)data, NULL, size, userdata, mode, VCHIQ_BULK_TRANSMIT); break; case VCHIQ_BULK_MODE_BLOCKING: - status = vchiq_blocking_bulk_transfer(handle, (void *)data, size, + status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size, VCHIQ_BULK_TRANSMIT); break; default: @@ -871,8 +873,8 @@ vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size, } EXPORT_SYMBOL(vchiq_bulk_transmit); -enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data, - unsigned int size, void *userdata, +enum vchiq_status vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle, + void *data, unsigned int size, void *userdata, enum vchiq_bulk_mode mode) { enum vchiq_status status; @@ -881,12 +883,12 @@ enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data, switch (mode) { case VCHIQ_BULK_MODE_NOCALLBACK: case VCHIQ_BULK_MODE_CALLBACK: - status = vchiq_bulk_transfer(handle, data, NULL, + status = vchiq_bulk_transfer(instance, handle, data, NULL, size, userdata, mode, VCHIQ_BULK_RECEIVE); break; case VCHIQ_BULK_MODE_BLOCKING: - status = vchiq_blocking_bulk_transfer(handle, (void *)data, size, + status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size, VCHIQ_BULK_RECEIVE); break; default: @@ -909,20 +911,17 @@ enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data, EXPORT_SYMBOL(vchiq_bulk_receive); static enum vchiq_status -vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size, - enum vchiq_bulk_dir dir) +vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data, + unsigned int size, enum vchiq_bulk_dir dir) { - struct vchiq_instance *instance; struct vchiq_service *service; enum vchiq_status status; struct bulk_waiter_node *waiter = NULL, *iter; - service = find_service_by_handle(handle); + service = find_service_by_handle(instance, handle); if (!service) return VCHIQ_ERROR; - instance = service->instance; - vchiq_service_put(service); mutex_lock(&instance->bulk_waiter_list_mutex); @@ -959,7 +958,7 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size, } } - status = vchiq_bulk_transfer(handle, data, NULL, size, + status = vchiq_bulk_transfer(instance, handle, data, NULL, size, &waiter->bulk_waiter, VCHIQ_BULK_MODE_BLOCKING, dir); if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) { @@ -1046,8 +1045,8 @@ add_completion(struct vchiq_instance *instance, enum vchiq_reason reason, } enum vchiq_status -service_callback(enum vchiq_reason reason, struct vchiq_header *header, - unsigned int handle, void *bulk_userdata) +service_callback(struct vchiq_instance *instance, enum vchiq_reason reason, + struct vchiq_header *header, unsigned int handle, void *bulk_userdata) { /* * How do we ensure the callback goes to the right client? @@ -1057,7 +1056,6 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, */ struct user_service *user_service; struct vchiq_service *service; - struct vchiq_instance *instance; bool skip_completion = false; DEBUG_INITIALISE(g_state.local); @@ -1065,14 +1063,13 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, DEBUG_TRACE(SERVICE_CALLBACK_LINE); rcu_read_lock(); - service = handle_to_service(handle); + service = handle_to_service(instance, handle); if (WARN_ON(!service)) { rcu_read_unlock(); return VCHIQ_SUCCESS; } user_service = (struct user_service *)service->base.userdata; - instance = user_service->instance; if (!instance || instance->closing) { rcu_read_unlock(); @@ -1318,7 +1315,8 @@ vchiq_get_state(void) */ static enum vchiq_status -vchiq_keepalive_vchiq_callback(enum vchiq_reason reason, +vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance, + enum vchiq_reason reason, struct vchiq_header *header, unsigned int service_user, void *bulk_user) { @@ -1387,14 +1385,14 @@ vchiq_keepalive_thread_func(void *v) */ while (uc--) { atomic_inc(&arm_state->ka_use_ack_count); - status = vchiq_use_service(ka_handle); + status = vchiq_use_service(instance, ka_handle); if (status != VCHIQ_SUCCESS) { vchiq_log_error(vchiq_susp_log_level, "%s vchiq_use_service error %d", __func__, status); } } while (rc--) { - status = vchiq_release_service(ka_handle); + status = vchiq_release_service(instance, ka_handle); if (status != VCHIQ_SUCCESS) { vchiq_log_error(vchiq_susp_log_level, "%s vchiq_release_service error %d", __func__, @@ -1590,10 +1588,10 @@ vchiq_instance_set_trace(struct vchiq_instance *instance, int trace) } enum vchiq_status -vchiq_use_service(unsigned int handle) +vchiq_use_service(struct vchiq_instance *instance, unsigned int handle) { enum vchiq_status ret = VCHIQ_ERROR; - struct vchiq_service *service = find_service_by_handle(handle); + struct vchiq_service *service = find_service_by_handle(instance, handle); if (service) { ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE); @@ -1604,10 +1602,10 @@ vchiq_use_service(unsigned int handle) EXPORT_SYMBOL(vchiq_use_service); enum vchiq_status -vchiq_release_service(unsigned int handle) +vchiq_release_service(struct vchiq_instance *instance, unsigned int handle) { enum vchiq_status ret = VCHIQ_ERROR; - struct vchiq_service *service = find_service_by_handle(handle); + struct vchiq_service *service = find_service_by_handle(instance, handle); if (service) { ret = vchiq_release_internal(service->state, service); diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h index 2aa46b119a46..2851ef6b9cd0 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h @@ -86,10 +86,10 @@ extern struct vchiq_state * vchiq_get_state(void); enum vchiq_status -vchiq_use_service(unsigned int handle); +vchiq_use_service(struct vchiq_instance *instance, unsigned int handle); extern enum vchiq_status -vchiq_release_service(unsigned int handle); +vchiq_release_service(struct vchiq_instance *instance, unsigned int handle); extern enum vchiq_status vchiq_check_service(struct vchiq_service *service); @@ -138,8 +138,8 @@ static inline int vchiq_register_chrdev(struct device *parent) { return 0; } #endif /* IS_ENABLED(CONFIG_VCHIQ_CDEV) */ extern enum vchiq_status -service_callback(enum vchiq_reason reason, struct vchiq_header *header, - unsigned int handle, void *bulk_userdata); +service_callback(struct vchiq_instance *vchiq_instance, enum vchiq_reason reason, + struct vchiq_header *header, unsigned int handle, void *bulk_userdata); extern void free_bulk_waiter(struct vchiq_instance *instance); diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c index 8f99272dbd6f..45ed30bfdbf5 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c @@ -13,6 +13,7 @@ #include <linux/rcupdate.h> #include <linux/sched/signal.h> +#include "vchiq_arm.h" #include "vchiq_core.h" #define VCHIQ_SLOT_HANDLER_STACK 8192 @@ -161,7 +162,6 @@ int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT; DEFINE_SPINLOCK(bulk_waiter_spinlock); static DEFINE_SPINLOCK(quota_spinlock); -struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES]; static unsigned int handle_seq; static const char *const srvstate_names[] = { @@ -234,13 +234,19 @@ set_service_state(struct vchiq_service *service, int newstate) service->srvstate = newstate; } +struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle) +{ + int idx = handle & (VCHIQ_MAX_SERVICES - 1); + + return rcu_dereference(instance->state->services[idx]); +} struct vchiq_service * -find_service_by_handle(unsigned int handle) +find_service_by_handle(struct vchiq_instance *instance, unsigned int handle) { struct vchiq_service *service; rcu_read_lock(); - service = handle_to_service(handle); + service = handle_to_service(instance, handle); if (service && service->srvstate != VCHIQ_SRVSTATE_FREE && service->handle == handle && kref_get_unless_zero(&service->ref_count)) { @@ -281,7 +287,7 @@ find_service_for_instance(struct vchiq_instance *instance, unsigned int handle) struct vchiq_service *service; rcu_read_lock(); - service = handle_to_service(handle); + service = handle_to_service(instance, handle); if (service && service->srvstate != VCHIQ_SRVSTATE_FREE && service->handle == handle && service->instance == instance && @@ -302,7 +308,7 @@ find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int h struct vchiq_service *service; rcu_read_lock(); - service = handle_to_service(handle); + service = handle_to_service(instance, handle); if (service && (service->srvstate == VCHIQ_SRVSTATE_FREE || service->srvstate == VCHIQ_SRVSTATE_CLOSED) && @@ -398,26 +404,26 @@ vchiq_service_put(struct vchiq_service *service) } int -vchiq_get_client_id(unsigned int handle) +vchiq_get_client_id(struct vchiq_instance *instance, unsigned int handle) { struct vchiq_service *service; int id; rcu_read_lock(); - service = handle_to_service(handle); + service = handle_to_service(instance, handle); id = service ? service->client_id : 0; rcu_read_unlock(); return id; } void * -vchiq_get_service_userdata(unsigned int handle) +vchiq_get_service_userdata(struct vchiq_instance *instance, unsigned int handle) { void *userdata; struct vchiq_service *service; rcu_read_lock(); - service = handle_to_service(handle); + service = handle_to_service(instance, handle); userdata = service ? service->base.userdata : NULL; rcu_read_unlock(); return userdata; @@ -466,7 +472,8 @@ make_service_callback(struct vchiq_service *service, enum vchiq_reason reason, vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)", service->state->id, service->localport, reason_names[reason], header, bulk_userdata); - status = service->base.callback(reason, header, service->handle, bulk_userdata); + status = service->base.callback(service->instance, reason, header, service->handle, + bulk_userdata); if (status == VCHIQ_ERROR) { vchiq_log_warning(vchiq_core_log_level, "%d: ignoring ERROR from callback to service %x", @@ -475,7 +482,7 @@ make_service_callback(struct vchiq_service *service, enum vchiq_reason reason, } if (reason != VCHIQ_MESSAGE_AVAILABLE) - vchiq_release_message(service->handle, header); + vchiq_release_message(service->instance, service->handle, header); return status; } @@ -521,6 +528,7 @@ remote_event_wait(wait_queue_head_t *wq, struct remote_event *event) return 0; } event->armed = 0; + /* Ensure that the peer sees that we are not waiting (armed == 0). */ wmb(); } @@ -643,6 +651,7 @@ request_poll(struct vchiq_state *state, struct vchiq_service *service, skip_service: state->poll_needed = 1; + /* Ensure the slot handler thread sees the poll_needed flag. */ wmb(); /* ... and ensure the slot handler runs. */ @@ -1149,6 +1158,7 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service, remote_event_wait(&state->sync_release_event, &local->sync_release); + /* Ensure that reads don't overtake the remote_event_wait. */ rmb(); header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state, @@ -1441,7 +1451,7 @@ abort_outstanding_bulks(struct vchiq_service *service, } if (queue->process != queue->local_insert) { - vchiq_complete_bulk(bulk); + vchiq_complete_bulk(service->instance, bulk); vchiq_log_info(SRVTRACE_LEVEL(service), "%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d", @@ -1769,7 +1779,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header) DEBUG_TRACE(PARSE_LINE); WARN_ON(queue->process == queue->local_insert); - vchiq_complete_bulk(bulk); + vchiq_complete_bulk(service->instance, bulk); queue->process++; mutex_unlock(&service->bulk_mutex); DEBUG_TRACE(PARSE_LINE); @@ -1952,6 +1962,7 @@ slot_handler_func(void *v) DEBUG_TRACE(SLOT_HANDLER_LINE); remote_event_wait(&state->trigger_event, &local->trigger); + /* Ensure that reads don't overtake the remote_event_wait. */ rmb(); DEBUG_TRACE(SLOT_HANDLER_LINE); @@ -2014,6 +2025,7 @@ sync_func(void *v) remote_event_wait(&state->sync_trigger_event, &local->sync_trigger); + /* Ensure that reads don't overtake the remote_event_wait. */ rmb(); msgid = header->msgid; @@ -2142,18 +2154,13 @@ vchiq_init_slots(void *mem_base, int mem_size) } int -vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero) +vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev) { struct vchiq_shared_state *local; struct vchiq_shared_state *remote; char threadname[16]; int i, ret; - if (vchiq_states[0]) { - pr_err("%s: VCHIQ state already initialized\n", __func__); - return -EINVAL; - } - local = &slot_zero->slave; remote = &slot_zero->master; @@ -2169,6 +2176,8 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero) memset(state, 0, sizeof(struct vchiq_state)); + state->dev = dev; + /* * initialize shared state pointers */ @@ -2272,8 +2281,6 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero) wake_up_process(state->recycle_thread); wake_up_process(state->sync_thread); - vchiq_states[0] = state; - /* Indicate readiness to the other side */ local->initialised = 1; @@ -2287,9 +2294,10 @@ fail_free_handler_thread: return ret; } -void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header) +void vchiq_msg_queue_push(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_header *header) { - struct vchiq_service *service = find_service_by_handle(handle); + struct vchiq_service *service = find_service_by_handle(instance, handle); int pos; if (!service) @@ -2309,9 +2317,9 @@ void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header) } EXPORT_SYMBOL(vchiq_msg_queue_push); -struct vchiq_header *vchiq_msg_hold(unsigned int handle) +struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned int handle) { - struct vchiq_service *service = find_service_by_handle(handle); + struct vchiq_service *service = find_service_by_handle(instance, handle); struct vchiq_header *header; int pos; @@ -2866,16 +2874,16 @@ vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instan /* Find all services registered to this client and remove them. */ i = 0; while ((service = next_service_by_instance(state, instance, &i)) != NULL) { - (void)vchiq_remove_service(service->handle); + (void)vchiq_remove_service(instance, service->handle); vchiq_service_put(service); } } enum vchiq_status -vchiq_close_service(unsigned int handle) +vchiq_close_service(struct vchiq_instance *instance, unsigned int handle) { /* Unregister the service */ - struct vchiq_service *service = find_service_by_handle(handle); + struct vchiq_service *service = find_service_by_handle(instance, handle); enum vchiq_status status = VCHIQ_SUCCESS; if (!service) @@ -2930,10 +2938,10 @@ vchiq_close_service(unsigned int handle) EXPORT_SYMBOL(vchiq_close_service); enum vchiq_status -vchiq_remove_service(unsigned int handle) +vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle) { /* Unregister the service */ - struct vchiq_service *service = find_service_by_handle(handle); + struct vchiq_service *service = find_service_by_handle(instance, handle); enum vchiq_status status = VCHIQ_SUCCESS; if (!service) @@ -2996,11 +3004,11 @@ vchiq_remove_service(unsigned int handle) * When called in blocking mode, the userdata field points to a bulk_waiter * structure. */ -enum vchiq_status vchiq_bulk_transfer(unsigned int handle, void *offset, void __user *uoffset, - int size, void *userdata, enum vchiq_bulk_mode mode, - enum vchiq_bulk_dir dir) +enum vchiq_status vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, + void *offset, void __user *uoffset, int size, void *userdata, + enum vchiq_bulk_mode mode, enum vchiq_bulk_dir dir) { - struct vchiq_service *service = find_service_by_handle(handle); + struct vchiq_service *service = find_service_by_handle(instance, handle); struct vchiq_bulk_queue *queue; struct vchiq_bulk *bulk; struct vchiq_state *state; @@ -3075,9 +3083,13 @@ enum vchiq_status vchiq_bulk_transfer(unsigned int handle, void *offset, void __ bulk->size = size; bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED; - if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir)) + if (vchiq_prepare_bulk_data(instance, bulk, offset, uoffset, size, dir)) goto unlock_error_exit; + /* + * Ensure that the bulk data record is visible to the peer + * before proceeding. + */ wmb(); vchiq_log_info(vchiq_core_log_level, "%d: bt (%d->%d) %cx %x@%pad %pK", @@ -3139,7 +3151,7 @@ waiting: unlock_both_error_exit: mutex_unlock(&state->slot_mutex); cancel_bulk_error_exit: - vchiq_complete_bulk(bulk); + vchiq_complete_bulk(service->instance, bulk); unlock_error_exit: mutex_unlock(&service->bulk_mutex); @@ -3150,13 +3162,13 @@ error_exit: } enum vchiq_status -vchiq_queue_message(unsigned int handle, +vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle, ssize_t (*copy_callback)(void *context, void *dest, size_t offset, size_t maxsize), void *context, size_t size) { - struct vchiq_service *service = find_service_by_handle(handle); + struct vchiq_service *service = find_service_by_handle(instance, handle); enum vchiq_status status = VCHIQ_ERROR; int data_id; @@ -3199,12 +3211,13 @@ error_exit: return status; } -int vchiq_queue_kernel_message(unsigned int handle, void *data, unsigned int size) +int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle, void *data, + unsigned int size) { enum vchiq_status status; while (1) { - status = vchiq_queue_message(handle, memcpy_copy_callback, + status = vchiq_queue_message(instance, handle, memcpy_copy_callback, data, size); /* @@ -3223,10 +3236,10 @@ int vchiq_queue_kernel_message(unsigned int handle, void *data, unsigned int siz EXPORT_SYMBOL(vchiq_queue_kernel_message); void -vchiq_release_message(unsigned int handle, +vchiq_release_message(struct vchiq_instance *instance, unsigned int handle, struct vchiq_header *header) { - struct vchiq_service *service = find_service_by_handle(handle); + struct vchiq_service *service = find_service_by_handle(instance, handle); struct vchiq_shared_state *remote; struct vchiq_state *state; int slot_index; @@ -3265,10 +3278,10 @@ release_message_sync(struct vchiq_state *state, struct vchiq_header *header) } enum vchiq_status -vchiq_get_peer_version(unsigned int handle, short *peer_version) +vchiq_get_peer_version(struct vchiq_instance *instance, unsigned int handle, short *peer_version) { enum vchiq_status status = VCHIQ_ERROR; - struct vchiq_service *service = find_service_by_handle(handle); + struct vchiq_service *service = find_service_by_handle(instance, handle); if (!service) goto exit; @@ -3300,9 +3313,10 @@ void vchiq_get_config(struct vchiq_config *config) } int -vchiq_set_service_option(unsigned int handle, enum vchiq_service_option option, int value) +vchiq_set_service_option(struct vchiq_instance *instance, unsigned int handle, + enum vchiq_service_option option, int value) { - struct vchiq_service *service = find_service_by_handle(handle); + struct vchiq_service *service = find_service_by_handle(instance, handle); struct vchiq_service_quota *quota; int ret = -EINVAL; diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h index 1ddc661642a9..8b4a38f5b3f2 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h @@ -314,6 +314,7 @@ struct vchiq_slot_zero { }; struct vchiq_state { + struct device *dev; int id; int initialised; enum vchiq_connstate conn_state; @@ -448,8 +449,6 @@ extern int vchiq_core_log_level; extern int vchiq_core_msg_log_level; extern int vchiq_sync_log_level; -extern struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES]; - extern const char * get_conn_state_name(enum vchiq_connstate conn_state); @@ -457,7 +456,7 @@ extern struct vchiq_slot_zero * vchiq_init_slots(void *mem_base, int mem_size); extern int -vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero); +vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev); extern enum vchiq_status vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance); @@ -487,8 +486,8 @@ extern void remote_event_pollall(struct vchiq_state *state); extern enum vchiq_status -vchiq_bulk_transfer(unsigned int handle, void *offset, void __user *uoffset, - int size, void *userdata, enum vchiq_bulk_mode mode, +vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *offset, + void __user *uoffset, int size, void *userdata, enum vchiq_bulk_mode mode, enum vchiq_bulk_dir dir); extern int @@ -507,20 +506,10 @@ extern void request_poll(struct vchiq_state *state, struct vchiq_service *service, int poll_type); -static inline struct vchiq_service * -handle_to_service(unsigned int handle) -{ - int idx = handle & (VCHIQ_MAX_SERVICES - 1); - struct vchiq_state *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) & - (VCHIQ_MAX_STATES - 1)]; - - if (!state) - return NULL; - return rcu_dereference(state->services[idx]); -} +struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle); extern struct vchiq_service * -find_service_by_handle(unsigned int handle); +find_service_by_handle(struct vchiq_instance *instance, unsigned int handle); extern struct vchiq_service * find_service_by_port(struct vchiq_state *state, unsigned int localport); @@ -548,16 +537,16 @@ extern void vchiq_service_put(struct vchiq_service *service); extern enum vchiq_status -vchiq_queue_message(unsigned int handle, +vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle, ssize_t (*copy_callback)(void *context, void *dest, size_t offset, size_t maxsize), void *context, size_t size); -int vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, void __user *uoffset, - int size, int dir); +int vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset, + void __user *uoffset, int size, int dir); -void vchiq_complete_bulk(struct vchiq_bulk *bulk); +void vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk); void remote_event_signal(struct remote_event *event); @@ -595,12 +584,13 @@ void vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newsta void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem, size_t num_bytes); -enum vchiq_status vchiq_remove_service(unsigned int service); +enum vchiq_status vchiq_remove_service(struct vchiq_instance *instance, unsigned int service); -int vchiq_get_client_id(unsigned int service); +int vchiq_get_client_id(struct vchiq_instance *instance, unsigned int service); void vchiq_get_config(struct vchiq_config *config); -int vchiq_set_service_option(unsigned int service, enum vchiq_service_option option, int value); +int vchiq_set_service_option(struct vchiq_instance *instance, unsigned int service, + enum vchiq_service_option option, int value); #endif diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c index 66bbfec332ba..7e297494437e 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c @@ -108,8 +108,8 @@ static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest, } static int -vchiq_ioc_queue_message(unsigned int handle, struct vchiq_element *elements, - unsigned long count) +vchiq_ioc_queue_message(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_element *elements, unsigned long count) { struct vchiq_io_copy_callback_context context; enum vchiq_status status = VCHIQ_SUCCESS; @@ -127,7 +127,7 @@ vchiq_ioc_queue_message(unsigned int handle, struct vchiq_element *elements, total_size += elements[i].size; } - status = vchiq_queue_message(handle, vchiq_ioc_copy_element_data, + status = vchiq_queue_message(instance, handle, vchiq_ioc_copy_element_data, &context, total_size); if (status == VCHIQ_ERROR) @@ -191,7 +191,7 @@ static int vchiq_ioc_create_service(struct vchiq_instance *instance, if (args->is_open) { status = vchiq_open_service_internal(service, instance->pid); if (status != VCHIQ_SUCCESS) { - vchiq_remove_service(service->handle); + vchiq_remove_service(instance, service->handle); return (status == VCHIQ_RETRY) ? -EINTR : -EIO; } @@ -266,7 +266,7 @@ static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance, /* Copy to user space if msgbuf is not NULL */ if (!args->buf || (copy_to_user(args->buf, header->data, header->size) == 0)) { ret = header->size; - vchiq_release_message(service->handle, header); + vchiq_release_message(instance, service->handle, header); } else { ret = -EFAULT; } @@ -330,7 +330,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance, userdata = args->userdata; } - status = vchiq_bulk_transfer(args->handle, NULL, args->data, args->size, + status = vchiq_bulk_transfer(instance, args->handle, NULL, args->data, args->size, userdata, args->mode, dir); if (!waiter) { @@ -529,7 +529,7 @@ static int vchiq_ioc_await_completion(struct vchiq_instance *instance, } /* Now it has been copied, the message can be released. */ - vchiq_release_message(service->handle, header); + vchiq_release_message(instance, service->handle, header); /* The completion must point to the msgbuf. */ user_completion.header = msgbuf; @@ -596,7 +596,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) i = 0; while ((service = next_service_by_instance(instance->state, instance, &i))) { - status = vchiq_remove_service(service->handle); + status = vchiq_remove_service(instance, service->handle); vchiq_service_put(service); if (status != VCHIQ_SUCCESS) break; @@ -649,7 +649,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; if (put_user(args.handle, &argp->handle)) { - vchiq_remove_service(args.handle); + vchiq_remove_service(instance, args.handle); ret = -EFAULT; } } break; @@ -673,8 +673,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) */ if (!user_service->close_pending) { status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ? - vchiq_close_service(service->handle) : - vchiq_remove_service(service->handle); + vchiq_close_service(instance, service->handle) : + vchiq_remove_service(instance, service->handle); if (status != VCHIQ_SUCCESS) break; } @@ -731,7 +731,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (copy_from_user(elements, args.elements, args.count * sizeof(struct vchiq_element)) == 0) - ret = vchiq_ioc_queue_message(args.handle, elements, + ret = vchiq_ioc_queue_message(instance, args.handle, elements, args.count); else ret = -EFAULT; @@ -788,7 +788,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case VCHIQ_IOC_GET_CLIENT_ID: { unsigned int handle = (unsigned int)arg; - ret = vchiq_get_client_id(handle); + ret = vchiq_get_client_id(instance, handle); } break; case VCHIQ_IOC_GET_CONFIG: { @@ -827,7 +827,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; } - ret = vchiq_set_service_option(args.handle, args.option, + ret = vchiq_set_service_option(instance, args.handle, args.option, args.value); } break; @@ -908,6 +908,7 @@ vchiq_compat_ioctl_create_service(struct file *file, unsigned int cmd, { struct vchiq_create_service args; struct vchiq_create_service32 args32; + struct vchiq_instance *instance = file->private_data; long ret; if (copy_from_user(&args32, ptrargs32, sizeof(args32))) @@ -926,12 +927,12 @@ vchiq_compat_ioctl_create_service(struct file *file, unsigned int cmd, .handle = args32.handle, }; - ret = vchiq_ioc_create_service(file->private_data, &args); + ret = vchiq_ioc_create_service(instance, &args); if (ret < 0) return ret; if (put_user(args.handle, &ptrargs32->handle)) { - vchiq_remove_service(args.handle); + vchiq_remove_service(instance, args.handle); return -EFAULT; } @@ -960,6 +961,7 @@ vchiq_compat_ioctl_queue_message(struct file *file, struct vchiq_queue_message args; struct vchiq_queue_message32 args32; struct vchiq_service *service; + struct vchiq_instance *instance = file->private_data; int ret; if (copy_from_user(&args32, arg, sizeof(args32))) @@ -974,7 +976,7 @@ vchiq_compat_ioctl_queue_message(struct file *file, if (args32.count > MAX_ELEMENTS) return -EINVAL; - service = find_service_for_instance(file->private_data, args.handle); + service = find_service_for_instance(instance, args.handle); if (!service) return -EINVAL; @@ -994,7 +996,7 @@ vchiq_compat_ioctl_queue_message(struct file *file, compat_ptr(element32[count].data); elements[count].size = element32[count].size; } - ret = vchiq_ioc_queue_message(args.handle, elements, + ret = vchiq_ioc_queue_message(instance, args.handle, elements, args.count); } else { ret = -EINVAL; @@ -1261,7 +1263,7 @@ static int vchiq_release(struct inode *inode, struct file *file) spin_unlock(&msg_queue_spinlock); if (header) - vchiq_release_message(service->handle, header); + vchiq_release_message(instance, service->handle, header); spin_lock(&msg_queue_spinlock); } diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c index 845b20e4d05a..cb921c94996a 100644 --- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c +++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c @@ -292,8 +292,8 @@ static void buffer_to_host_work_cb(struct work_struct *work) /* Dummy receive to ensure the buffers remain in order */ len = 8; /* queue the bulk submission */ - vchiq_use_service(instance->service_handle); - ret = vchiq_bulk_receive(instance->service_handle, + vchiq_use_service(instance->vchiq_instance, instance->service_handle); + ret = vchiq_bulk_receive(instance->vchiq_instance, instance->service_handle, msg_context->u.bulk.buffer->buffer, /* Actual receive needs to be a multiple * of 4 bytes @@ -302,7 +302,7 @@ static void buffer_to_host_work_cb(struct work_struct *work) msg_context, VCHIQ_BULK_MODE_CALLBACK); - vchiq_release_service(instance->service_handle); + vchiq_release_service(instance->vchiq_instance, instance->service_handle); if (ret != 0) pr_err("%s: ctx: %p, vchiq_bulk_receive failed %d\n", @@ -436,15 +436,15 @@ buffer_from_host(struct vchiq_mmal_instance *instance, /* no payload in message */ m.u.buffer_from_host.payload_in_message = 0; - vchiq_use_service(instance->service_handle); + vchiq_use_service(instance->vchiq_instance, instance->service_handle); - ret = vchiq_queue_kernel_message(instance->service_handle, &m, + ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, &m, sizeof(struct mmal_msg_header) + sizeof(m.u.buffer_from_host)); if (ret) atomic_dec(&port->buffers_with_vpu); - vchiq_release_service(instance->service_handle); + vchiq_release_service(instance->vchiq_instance, instance->service_handle); return ret; } @@ -548,11 +548,12 @@ static void bulk_abort_cb(struct vchiq_mmal_instance *instance, } /* incoming event service callback */ -static enum vchiq_status service_callback(enum vchiq_reason reason, +static enum vchiq_status service_callback(struct vchiq_instance *vchiq_instance, + enum vchiq_reason reason, struct vchiq_header *header, unsigned int handle, void *bulk_ctx) { - struct vchiq_mmal_instance *instance = vchiq_get_service_userdata(handle); + struct vchiq_mmal_instance *instance = vchiq_get_service_userdata(vchiq_instance, handle); u32 msg_len; struct mmal_msg *msg; struct mmal_msg_context *msg_context; @@ -572,25 +573,25 @@ static enum vchiq_status service_callback(enum vchiq_reason reason, /* handling is different for buffer messages */ switch (msg->h.type) { case MMAL_MSG_TYPE_BUFFER_FROM_HOST: - vchiq_release_message(handle, header); + vchiq_release_message(vchiq_instance, handle, header); break; case MMAL_MSG_TYPE_EVENT_TO_HOST: event_to_host_cb(instance, msg, msg_len); - vchiq_release_message(handle, header); + vchiq_release_message(vchiq_instance, handle, header); break; case MMAL_MSG_TYPE_BUFFER_TO_HOST: buffer_to_host_cb(instance, msg, msg_len); - vchiq_release_message(handle, header); + vchiq_release_message(vchiq_instance, handle, header); break; default: /* messages dependent on header context to complete */ if (!msg->h.context) { pr_err("received message context was null!\n"); - vchiq_release_message(handle, header); + vchiq_release_message(vchiq_instance, handle, header); break; } @@ -599,7 +600,7 @@ static enum vchiq_status service_callback(enum vchiq_reason reason, if (!msg_context) { pr_err("received invalid message context %u!\n", msg->h.context); - vchiq_release_message(handle, header); + vchiq_release_message(vchiq_instance, handle, header); break; } @@ -678,13 +679,13 @@ static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance, DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len), ">>> sync message"); - vchiq_use_service(instance->service_handle); + vchiq_use_service(instance->vchiq_instance, instance->service_handle); - ret = vchiq_queue_kernel_message(instance->service_handle, msg, + ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, msg, sizeof(struct mmal_msg_header) + payload_len); - vchiq_release_service(instance->service_handle); + vchiq_release_service(instance->vchiq_instance, instance->service_handle); if (ret) { pr_err("error %d queuing message\n", ret); @@ -824,7 +825,7 @@ static int port_info_set(struct vchiq_mmal_instance *instance, port->component->handle, port->handle); release_msg: - vchiq_release_message(instance->service_handle, rmsg_handle); + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); return ret; } @@ -919,7 +920,7 @@ release_msg: pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret, port->component->handle, port->handle); - vchiq_release_message(instance->service_handle, rmsg_handle); + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); return ret; } @@ -967,7 +968,7 @@ static int create_component(struct vchiq_mmal_instance *instance, component->inputs, component->outputs, component->clocks); release_msg: - vchiq_release_message(instance->service_handle, rmsg_handle); + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); return ret; } @@ -1000,7 +1001,7 @@ static int destroy_component(struct vchiq_mmal_instance *instance, release_msg: - vchiq_release_message(instance->service_handle, rmsg_handle); + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); return ret; } @@ -1032,7 +1033,7 @@ static int enable_component(struct vchiq_mmal_instance *instance, ret = -rmsg->u.component_enable_reply.status; release_msg: - vchiq_release_message(instance->service_handle, rmsg_handle); + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); return ret; } @@ -1065,7 +1066,7 @@ static int disable_component(struct vchiq_mmal_instance *instance, release_msg: - vchiq_release_message(instance->service_handle, rmsg_handle); + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); return ret; } @@ -1097,7 +1098,7 @@ static int get_version(struct vchiq_mmal_instance *instance, *minor_out = rmsg->u.version.minor; release_msg: - vchiq_release_message(instance->service_handle, rmsg_handle); + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); return ret; } @@ -1139,7 +1140,7 @@ static int port_action_port(struct vchiq_mmal_instance *instance, port_action_type_names[action_type], action_type); release_msg: - vchiq_release_message(instance->service_handle, rmsg_handle); + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); return ret; } @@ -1187,7 +1188,7 @@ static int port_action_handle(struct vchiq_mmal_instance *instance, action_type, connect_component_handle, connect_port_handle); release_msg: - vchiq_release_message(instance->service_handle, rmsg_handle); + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); return ret; } @@ -1228,7 +1229,7 @@ static int port_parameter_set(struct vchiq_mmal_instance *instance, ret, port->component->handle, port->handle, parameter_id); release_msg: - vchiq_release_message(instance->service_handle, rmsg_handle); + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); return ret; } @@ -1287,7 +1288,7 @@ static int port_parameter_get(struct vchiq_mmal_instance *instance, ret, port->component->handle, port->handle, parameter_id); release_msg: - vchiq_release_message(instance->service_handle, rmsg_handle); + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); return ret; } @@ -1832,9 +1833,9 @@ int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance) if (mutex_lock_interruptible(&instance->vchiq_mutex)) return -EINTR; - vchiq_use_service(instance->service_handle); + vchiq_use_service(instance->vchiq_instance, instance->service_handle); - status = vchiq_close_service(instance->service_handle); + status = vchiq_close_service(instance->vchiq_instance, instance->service_handle); if (status != 0) pr_err("mmal-vchiq: VCHIQ close failed\n"); @@ -1922,14 +1923,14 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance) goto err_close_services; } - vchiq_release_service(instance->service_handle); + vchiq_release_service(instance->vchiq_instance, instance->service_handle); *out_instance = instance; return 0; err_close_services: - vchiq_close_service(instance->service_handle); + vchiq_close_service(instance->vchiq_instance, instance->service_handle); destroy_workqueue(instance->bulk_wq); err_free: kfree(instance); diff --git a/drivers/staging/vme_user/Kconfig b/drivers/staging/vme_user/Kconfig index e8b4461bf27f..c8eabf8f40f1 100644 --- a/drivers/staging/vme_user/Kconfig +++ b/drivers/staging/vme_user/Kconfig @@ -1,4 +1,29 @@ # SPDX-License-Identifier: GPL-2.0 +menuconfig VME_BUS + bool "VME bridge support" + depends on STAGING && PCI + help + If you say Y here you get support for the VME bridge Framework. + +if VME_BUS + +comment "VME Bridge Drivers" + +config VME_TSI148 + tristate "Tempe" + depends on HAS_DMA + help + If you say Y here you get support for the Tundra TSI148 VME bridge + chip. + +config VME_FAKE + tristate "Fake" + help + If you say Y here you get support for the fake VME bridge. This + provides a virtualised VME Bus for devices with no VME bridge. This + is mainly useful for VME development (in the absence of VME + hardware). + comment "VME Device Drivers" config VME_USER @@ -11,3 +36,5 @@ config VME_USER To compile this driver as a module, choose M here. The module will be called vme_user. If unsure, say N. + +endif diff --git a/drivers/staging/vme_user/Makefile b/drivers/staging/vme_user/Makefile index 5380115139b0..8dcc6938ce5c 100644 --- a/drivers/staging/vme_user/Makefile +++ b/drivers/staging/vme_user/Makefile @@ -3,4 +3,7 @@ # Makefile for the VME device drivers. # +obj-$(CONFIG_VME_BUS) += vme.o obj-$(CONFIG_VME_USER) += vme_user.o +obj-$(CONFIG_VME_TSI148) += vme_tsi148.o +obj-$(CONFIG_VME_FAKE) += vme_fake.o diff --git a/drivers/staging/vme_user/vme.c b/drivers/staging/vme_user/vme.c new file mode 100644 index 000000000000..b5555683a069 --- /dev/null +++ b/drivers/staging/vme_user/vme.c @@ -0,0 +1,2015 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * VME Bridge Framework + * + * Author: Martyn Welch <martyn.welch@ge.com> + * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. + * + * Based on work by Tom Armistead and Ajit Prem + * Copyright 2004 Motorola Inc. + */ + +#include <linux/init.h> +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/poll.h> +#include <linux/highmem.h> +#include <linux/interrupt.h> +#include <linux/pagemap.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/syscalls.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> +#include <linux/slab.h> + +#include "vme.h" +#include "vme_bridge.h" + +/* Bitmask and list of registered buses both protected by common mutex */ +static unsigned int vme_bus_numbers; +static LIST_HEAD(vme_bus_list); +static DEFINE_MUTEX(vme_buses_lock); + +static int __init vme_init(void); + +static struct vme_dev *dev_to_vme_dev(struct device *dev) +{ + return container_of(dev, struct vme_dev, dev); +} + +/* + * Find the bridge that the resource is associated with. + */ +static struct vme_bridge *find_bridge(struct vme_resource *resource) +{ + /* Get list to search */ + switch (resource->type) { + case VME_MASTER: + return list_entry(resource->entry, struct vme_master_resource, + list)->parent; + case VME_SLAVE: + return list_entry(resource->entry, struct vme_slave_resource, + list)->parent; + case VME_DMA: + return list_entry(resource->entry, struct vme_dma_resource, + list)->parent; + case VME_LM: + return list_entry(resource->entry, struct vme_lm_resource, + list)->parent; + default: + printk(KERN_ERR "Unknown resource type\n"); + return NULL; + } +} + +/** + * vme_alloc_consistent - Allocate contiguous memory. + * @resource: Pointer to VME resource. + * @size: Size of allocation required. + * @dma: Pointer to variable to store physical address of allocation. + * + * Allocate a contiguous block of memory for use by the driver. This is used to + * create the buffers for the slave windows. + * + * Return: Virtual address of allocation on success, NULL on failure. + */ +void *vme_alloc_consistent(struct vme_resource *resource, size_t size, + dma_addr_t *dma) +{ + struct vme_bridge *bridge; + + if (!resource) { + printk(KERN_ERR "No resource\n"); + return NULL; + } + + bridge = find_bridge(resource); + if (!bridge) { + printk(KERN_ERR "Can't find bridge\n"); + return NULL; + } + + if (!bridge->parent) { + printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name); + return NULL; + } + + if (!bridge->alloc_consistent) { + printk(KERN_ERR "alloc_consistent not supported by bridge %s\n", + bridge->name); + return NULL; + } + + return bridge->alloc_consistent(bridge->parent, size, dma); +} +EXPORT_SYMBOL(vme_alloc_consistent); + +/** + * vme_free_consistent - Free previously allocated memory. + * @resource: Pointer to VME resource. + * @size: Size of allocation to free. + * @vaddr: Virtual address of allocation. + * @dma: Physical address of allocation. + * + * Free previously allocated block of contiguous memory. + */ +void vme_free_consistent(struct vme_resource *resource, size_t size, + void *vaddr, dma_addr_t dma) +{ + struct vme_bridge *bridge; + + if (!resource) { + printk(KERN_ERR "No resource\n"); + return; + } + + bridge = find_bridge(resource); + if (!bridge) { + printk(KERN_ERR "Can't find bridge\n"); + return; + } + + if (!bridge->parent) { + printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name); + return; + } + + if (!bridge->free_consistent) { + printk(KERN_ERR "free_consistent not supported by bridge %s\n", + bridge->name); + return; + } + + bridge->free_consistent(bridge->parent, size, vaddr, dma); +} +EXPORT_SYMBOL(vme_free_consistent); + +/** + * vme_get_size - Helper function returning size of a VME window + * @resource: Pointer to VME slave or master resource. + * + * Determine the size of the VME window provided. This is a helper + * function, wrappering the call to vme_master_get or vme_slave_get + * depending on the type of window resource handed to it. + * + * Return: Size of the window on success, zero on failure. + */ +size_t vme_get_size(struct vme_resource *resource) +{ + int enabled, retval; + unsigned long long base, size; + dma_addr_t buf_base; + u32 aspace, cycle, dwidth; + + switch (resource->type) { + case VME_MASTER: + retval = vme_master_get(resource, &enabled, &base, &size, + &aspace, &cycle, &dwidth); + if (retval) + return 0; + + return size; + case VME_SLAVE: + retval = vme_slave_get(resource, &enabled, &base, &size, + &buf_base, &aspace, &cycle); + if (retval) + return 0; + + return size; + case VME_DMA: + return 0; + default: + printk(KERN_ERR "Unknown resource type\n"); + return 0; + } +} +EXPORT_SYMBOL(vme_get_size); + +int vme_check_window(u32 aspace, unsigned long long vme_base, + unsigned long long size) +{ + int retval = 0; + + if (vme_base + size < size) + return -EINVAL; + + switch (aspace) { + case VME_A16: + if (vme_base + size > VME_A16_MAX) + retval = -EFAULT; + break; + case VME_A24: + if (vme_base + size > VME_A24_MAX) + retval = -EFAULT; + break; + case VME_A32: + if (vme_base + size > VME_A32_MAX) + retval = -EFAULT; + break; + case VME_A64: + /* The VME_A64_MAX limit is actually U64_MAX + 1 */ + break; + case VME_CRCSR: + if (vme_base + size > VME_CRCSR_MAX) + retval = -EFAULT; + break; + case VME_USER1: + case VME_USER2: + case VME_USER3: + case VME_USER4: + /* User Defined */ + break; + default: + printk(KERN_ERR "Invalid address space\n"); + retval = -EINVAL; + break; + } + + return retval; +} +EXPORT_SYMBOL(vme_check_window); + +static u32 vme_get_aspace(int am) +{ + switch (am) { + case 0x29: + case 0x2D: + return VME_A16; + case 0x38: + case 0x39: + case 0x3A: + case 0x3B: + case 0x3C: + case 0x3D: + case 0x3E: + case 0x3F: + return VME_A24; + case 0x8: + case 0x9: + case 0xA: + case 0xB: + case 0xC: + case 0xD: + case 0xE: + case 0xF: + return VME_A32; + case 0x0: + case 0x1: + case 0x3: + return VME_A64; + } + + return 0; +} + +/** + * vme_slave_request - Request a VME slave window resource. + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * @address: Required VME address space. + * @cycle: Required VME data transfer cycle type. + * + * Request use of a VME window resource capable of being set for the requested + * address space and data transfer cycle. + * + * Return: Pointer to VME resource on success, NULL on failure. + */ +struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address, + u32 cycle) +{ + struct vme_bridge *bridge; + struct list_head *slave_pos = NULL; + struct vme_slave_resource *allocated_image = NULL; + struct vme_slave_resource *slave_image = NULL; + struct vme_resource *resource = NULL; + + bridge = vdev->bridge; + if (!bridge) { + printk(KERN_ERR "Can't find VME bus\n"); + goto err_bus; + } + + /* Loop through slave resources */ + list_for_each(slave_pos, &bridge->slave_resources) { + slave_image = list_entry(slave_pos, + struct vme_slave_resource, list); + + if (!slave_image) { + printk(KERN_ERR "Registered NULL Slave resource\n"); + continue; + } + + /* Find an unlocked and compatible image */ + mutex_lock(&slave_image->mtx); + if (((slave_image->address_attr & address) == address) && + ((slave_image->cycle_attr & cycle) == cycle) && + (slave_image->locked == 0)) { + + slave_image->locked = 1; + mutex_unlock(&slave_image->mtx); + allocated_image = slave_image; + break; + } + mutex_unlock(&slave_image->mtx); + } + + /* No free image */ + if (!allocated_image) + goto err_image; + + resource = kmalloc(sizeof(*resource), GFP_KERNEL); + if (!resource) + goto err_alloc; + + resource->type = VME_SLAVE; + resource->entry = &allocated_image->list; + + return resource; + +err_alloc: + /* Unlock image */ + mutex_lock(&slave_image->mtx); + slave_image->locked = 0; + mutex_unlock(&slave_image->mtx); +err_image: +err_bus: + return NULL; +} +EXPORT_SYMBOL(vme_slave_request); + +/** + * vme_slave_set - Set VME slave window configuration. + * @resource: Pointer to VME slave resource. + * @enabled: State to which the window should be configured. + * @vme_base: Base address for the window. + * @size: Size of the VME window. + * @buf_base: Based address of buffer used to provide VME slave window storage. + * @aspace: VME address space for the VME window. + * @cycle: VME data transfer cycle type for the VME window. + * + * Set configuration for provided VME slave window. + * + * Return: Zero on success, -EINVAL if operation is not supported on this + * device, if an invalid resource has been provided or invalid + * attributes are provided. Hardware specific errors may also be + * returned. + */ +int vme_slave_set(struct vme_resource *resource, int enabled, + unsigned long long vme_base, unsigned long long size, + dma_addr_t buf_base, u32 aspace, u32 cycle) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_slave_resource *image; + int retval; + + if (resource->type != VME_SLAVE) { + printk(KERN_ERR "Not a slave resource\n"); + return -EINVAL; + } + + image = list_entry(resource->entry, struct vme_slave_resource, list); + + if (!bridge->slave_set) { + printk(KERN_ERR "Function not supported\n"); + return -ENOSYS; + } + + if (!(((image->address_attr & aspace) == aspace) && + ((image->cycle_attr & cycle) == cycle))) { + printk(KERN_ERR "Invalid attributes\n"); + return -EINVAL; + } + + retval = vme_check_window(aspace, vme_base, size); + if (retval) + return retval; + + return bridge->slave_set(image, enabled, vme_base, size, buf_base, + aspace, cycle); +} +EXPORT_SYMBOL(vme_slave_set); + +/** + * vme_slave_get - Retrieve VME slave window configuration. + * @resource: Pointer to VME slave resource. + * @enabled: Pointer to variable for storing state. + * @vme_base: Pointer to variable for storing window base address. + * @size: Pointer to variable for storing window size. + * @buf_base: Pointer to variable for storing slave buffer base address. + * @aspace: Pointer to variable for storing VME address space. + * @cycle: Pointer to variable for storing VME data transfer cycle type. + * + * Return configuration for provided VME slave window. + * + * Return: Zero on success, -EINVAL if operation is not supported on this + * device or if an invalid resource has been provided. + */ +int vme_slave_get(struct vme_resource *resource, int *enabled, + unsigned long long *vme_base, unsigned long long *size, + dma_addr_t *buf_base, u32 *aspace, u32 *cycle) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_slave_resource *image; + + if (resource->type != VME_SLAVE) { + printk(KERN_ERR "Not a slave resource\n"); + return -EINVAL; + } + + image = list_entry(resource->entry, struct vme_slave_resource, list); + + if (!bridge->slave_get) { + printk(KERN_ERR "vme_slave_get not supported\n"); + return -EINVAL; + } + + return bridge->slave_get(image, enabled, vme_base, size, buf_base, + aspace, cycle); +} +EXPORT_SYMBOL(vme_slave_get); + +/** + * vme_slave_free - Free VME slave window + * @resource: Pointer to VME slave resource. + * + * Free the provided slave resource so that it may be reallocated. + */ +void vme_slave_free(struct vme_resource *resource) +{ + struct vme_slave_resource *slave_image; + + if (resource->type != VME_SLAVE) { + printk(KERN_ERR "Not a slave resource\n"); + return; + } + + slave_image = list_entry(resource->entry, struct vme_slave_resource, + list); + if (!slave_image) { + printk(KERN_ERR "Can't find slave resource\n"); + return; + } + + /* Unlock image */ + mutex_lock(&slave_image->mtx); + if (slave_image->locked == 0) + printk(KERN_ERR "Image is already free\n"); + + slave_image->locked = 0; + mutex_unlock(&slave_image->mtx); + + /* Free up resource memory */ + kfree(resource); +} +EXPORT_SYMBOL(vme_slave_free); + +/** + * vme_master_request - Request a VME master window resource. + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * @address: Required VME address space. + * @cycle: Required VME data transfer cycle type. + * @dwidth: Required VME data transfer width. + * + * Request use of a VME window resource capable of being set for the requested + * address space, data transfer cycle and width. + * + * Return: Pointer to VME resource on success, NULL on failure. + */ +struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address, + u32 cycle, u32 dwidth) +{ + struct vme_bridge *bridge; + struct list_head *master_pos = NULL; + struct vme_master_resource *allocated_image = NULL; + struct vme_master_resource *master_image = NULL; + struct vme_resource *resource = NULL; + + bridge = vdev->bridge; + if (!bridge) { + printk(KERN_ERR "Can't find VME bus\n"); + goto err_bus; + } + + /* Loop through master resources */ + list_for_each(master_pos, &bridge->master_resources) { + master_image = list_entry(master_pos, + struct vme_master_resource, list); + + if (!master_image) { + printk(KERN_WARNING "Registered NULL master resource\n"); + continue; + } + + /* Find an unlocked and compatible image */ + spin_lock(&master_image->lock); + if (((master_image->address_attr & address) == address) && + ((master_image->cycle_attr & cycle) == cycle) && + ((master_image->width_attr & dwidth) == dwidth) && + (master_image->locked == 0)) { + + master_image->locked = 1; + spin_unlock(&master_image->lock); + allocated_image = master_image; + break; + } + spin_unlock(&master_image->lock); + } + + /* Check to see if we found a resource */ + if (!allocated_image) { + printk(KERN_ERR "Can't find a suitable resource\n"); + goto err_image; + } + + resource = kmalloc(sizeof(*resource), GFP_KERNEL); + if (!resource) + goto err_alloc; + + resource->type = VME_MASTER; + resource->entry = &allocated_image->list; + + return resource; + +err_alloc: + /* Unlock image */ + spin_lock(&master_image->lock); + master_image->locked = 0; + spin_unlock(&master_image->lock); +err_image: +err_bus: + return NULL; +} +EXPORT_SYMBOL(vme_master_request); + +/** + * vme_master_set - Set VME master window configuration. + * @resource: Pointer to VME master resource. + * @enabled: State to which the window should be configured. + * @vme_base: Base address for the window. + * @size: Size of the VME window. + * @aspace: VME address space for the VME window. + * @cycle: VME data transfer cycle type for the VME window. + * @dwidth: VME data transfer width for the VME window. + * + * Set configuration for provided VME master window. + * + * Return: Zero on success, -EINVAL if operation is not supported on this + * device, if an invalid resource has been provided or invalid + * attributes are provided. Hardware specific errors may also be + * returned. + */ +int vme_master_set(struct vme_resource *resource, int enabled, + unsigned long long vme_base, unsigned long long size, u32 aspace, + u32 cycle, u32 dwidth) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_master_resource *image; + int retval; + + if (resource->type != VME_MASTER) { + printk(KERN_ERR "Not a master resource\n"); + return -EINVAL; + } + + image = list_entry(resource->entry, struct vme_master_resource, list); + + if (!bridge->master_set) { + printk(KERN_WARNING "vme_master_set not supported\n"); + return -EINVAL; + } + + if (!(((image->address_attr & aspace) == aspace) && + ((image->cycle_attr & cycle) == cycle) && + ((image->width_attr & dwidth) == dwidth))) { + printk(KERN_WARNING "Invalid attributes\n"); + return -EINVAL; + } + + retval = vme_check_window(aspace, vme_base, size); + if (retval) + return retval; + + return bridge->master_set(image, enabled, vme_base, size, aspace, + cycle, dwidth); +} +EXPORT_SYMBOL(vme_master_set); + +/** + * vme_master_get - Retrieve VME master window configuration. + * @resource: Pointer to VME master resource. + * @enabled: Pointer to variable for storing state. + * @vme_base: Pointer to variable for storing window base address. + * @size: Pointer to variable for storing window size. + * @aspace: Pointer to variable for storing VME address space. + * @cycle: Pointer to variable for storing VME data transfer cycle type. + * @dwidth: Pointer to variable for storing VME data transfer width. + * + * Return configuration for provided VME master window. + * + * Return: Zero on success, -EINVAL if operation is not supported on this + * device or if an invalid resource has been provided. + */ +int vme_master_get(struct vme_resource *resource, int *enabled, + unsigned long long *vme_base, unsigned long long *size, u32 *aspace, + u32 *cycle, u32 *dwidth) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_master_resource *image; + + if (resource->type != VME_MASTER) { + printk(KERN_ERR "Not a master resource\n"); + return -EINVAL; + } + + image = list_entry(resource->entry, struct vme_master_resource, list); + + if (!bridge->master_get) { + printk(KERN_WARNING "%s not supported\n", __func__); + return -EINVAL; + } + + return bridge->master_get(image, enabled, vme_base, size, aspace, + cycle, dwidth); +} +EXPORT_SYMBOL(vme_master_get); + +/** + * vme_master_read - Read data from VME space into a buffer. + * @resource: Pointer to VME master resource. + * @buf: Pointer to buffer where data should be transferred. + * @count: Number of bytes to transfer. + * @offset: Offset into VME master window at which to start transfer. + * + * Perform read of count bytes of data from location on VME bus which maps into + * the VME master window at offset to buf. + * + * Return: Number of bytes read, -EINVAL if resource is not a VME master + * resource or read operation is not supported. -EFAULT returned if + * invalid offset is provided. Hardware specific errors may also be + * returned. + */ +ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count, + loff_t offset) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_master_resource *image; + size_t length; + + if (!bridge->master_read) { + printk(KERN_WARNING "Reading from resource not supported\n"); + return -EINVAL; + } + + if (resource->type != VME_MASTER) { + printk(KERN_ERR "Not a master resource\n"); + return -EINVAL; + } + + image = list_entry(resource->entry, struct vme_master_resource, list); + + length = vme_get_size(resource); + + if (offset > length) { + printk(KERN_WARNING "Invalid Offset\n"); + return -EFAULT; + } + + if ((offset + count) > length) + count = length - offset; + + return bridge->master_read(image, buf, count, offset); + +} +EXPORT_SYMBOL(vme_master_read); + +/** + * vme_master_write - Write data out to VME space from a buffer. + * @resource: Pointer to VME master resource. + * @buf: Pointer to buffer holding data to transfer. + * @count: Number of bytes to transfer. + * @offset: Offset into VME master window at which to start transfer. + * + * Perform write of count bytes of data from buf to location on VME bus which + * maps into the VME master window at offset. + * + * Return: Number of bytes written, -EINVAL if resource is not a VME master + * resource or write operation is not supported. -EFAULT returned if + * invalid offset is provided. Hardware specific errors may also be + * returned. + */ +ssize_t vme_master_write(struct vme_resource *resource, void *buf, + size_t count, loff_t offset) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_master_resource *image; + size_t length; + + if (!bridge->master_write) { + printk(KERN_WARNING "Writing to resource not supported\n"); + return -EINVAL; + } + + if (resource->type != VME_MASTER) { + printk(KERN_ERR "Not a master resource\n"); + return -EINVAL; + } + + image = list_entry(resource->entry, struct vme_master_resource, list); + + length = vme_get_size(resource); + + if (offset > length) { + printk(KERN_WARNING "Invalid Offset\n"); + return -EFAULT; + } + + if ((offset + count) > length) + count = length - offset; + + return bridge->master_write(image, buf, count, offset); +} +EXPORT_SYMBOL(vme_master_write); + +/** + * vme_master_rmw - Perform read-modify-write cycle. + * @resource: Pointer to VME master resource. + * @mask: Bits to be compared and swapped in operation. + * @compare: Bits to be compared with data read from offset. + * @swap: Bits to be swapped in data read from offset. + * @offset: Offset into VME master window at which to perform operation. + * + * Perform read-modify-write cycle on provided location: + * - Location on VME bus is read. + * - Bits selected by mask are compared with compare. + * - Where a selected bit matches that in compare and are selected in swap, + * the bit is swapped. + * - Result written back to location on VME bus. + * + * Return: Bytes written on success, -EINVAL if resource is not a VME master + * resource or RMW operation is not supported. Hardware specific + * errors may also be returned. + */ +unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask, + unsigned int compare, unsigned int swap, loff_t offset) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_master_resource *image; + + if (!bridge->master_rmw) { + printk(KERN_WARNING "Writing to resource not supported\n"); + return -EINVAL; + } + + if (resource->type != VME_MASTER) { + printk(KERN_ERR "Not a master resource\n"); + return -EINVAL; + } + + image = list_entry(resource->entry, struct vme_master_resource, list); + + return bridge->master_rmw(image, mask, compare, swap, offset); +} +EXPORT_SYMBOL(vme_master_rmw); + +/** + * vme_master_mmap - Mmap region of VME master window. + * @resource: Pointer to VME master resource. + * @vma: Pointer to definition of user mapping. + * + * Memory map a region of the VME master window into user space. + * + * Return: Zero on success, -EINVAL if resource is not a VME master + * resource or -EFAULT if map exceeds window size. Other generic mmap + * errors may also be returned. + */ +int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma) +{ + struct vme_master_resource *image; + phys_addr_t phys_addr; + unsigned long vma_size; + + if (resource->type != VME_MASTER) { + pr_err("Not a master resource\n"); + return -EINVAL; + } + + image = list_entry(resource->entry, struct vme_master_resource, list); + phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT); + vma_size = vma->vm_end - vma->vm_start; + + if (phys_addr + vma_size > image->bus_resource.end + 1) { + pr_err("Map size cannot exceed the window size\n"); + return -EFAULT; + } + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start); +} +EXPORT_SYMBOL(vme_master_mmap); + +/** + * vme_master_free - Free VME master window + * @resource: Pointer to VME master resource. + * + * Free the provided master resource so that it may be reallocated. + */ +void vme_master_free(struct vme_resource *resource) +{ + struct vme_master_resource *master_image; + + if (resource->type != VME_MASTER) { + printk(KERN_ERR "Not a master resource\n"); + return; + } + + master_image = list_entry(resource->entry, struct vme_master_resource, + list); + if (!master_image) { + printk(KERN_ERR "Can't find master resource\n"); + return; + } + + /* Unlock image */ + spin_lock(&master_image->lock); + if (master_image->locked == 0) + printk(KERN_ERR "Image is already free\n"); + + master_image->locked = 0; + spin_unlock(&master_image->lock); + + /* Free up resource memory */ + kfree(resource); +} +EXPORT_SYMBOL(vme_master_free); + +/** + * vme_dma_request - Request a DMA controller. + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * @route: Required src/destination combination. + * + * Request a VME DMA controller with capability to perform transfers bewteen + * requested source/destination combination. + * + * Return: Pointer to VME DMA resource on success, NULL on failure. + */ +struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route) +{ + struct vme_bridge *bridge; + struct list_head *dma_pos = NULL; + struct vme_dma_resource *allocated_ctrlr = NULL; + struct vme_dma_resource *dma_ctrlr = NULL; + struct vme_resource *resource = NULL; + + /* XXX Not checking resource attributes */ + printk(KERN_ERR "No VME resource Attribute tests done\n"); + + bridge = vdev->bridge; + if (!bridge) { + printk(KERN_ERR "Can't find VME bus\n"); + goto err_bus; + } + + /* Loop through DMA resources */ + list_for_each(dma_pos, &bridge->dma_resources) { + dma_ctrlr = list_entry(dma_pos, + struct vme_dma_resource, list); + if (!dma_ctrlr) { + printk(KERN_ERR "Registered NULL DMA resource\n"); + continue; + } + + /* Find an unlocked and compatible controller */ + mutex_lock(&dma_ctrlr->mtx); + if (((dma_ctrlr->route_attr & route) == route) && + (dma_ctrlr->locked == 0)) { + + dma_ctrlr->locked = 1; + mutex_unlock(&dma_ctrlr->mtx); + allocated_ctrlr = dma_ctrlr; + break; + } + mutex_unlock(&dma_ctrlr->mtx); + } + + /* Check to see if we found a resource */ + if (!allocated_ctrlr) + goto err_ctrlr; + + resource = kmalloc(sizeof(*resource), GFP_KERNEL); + if (!resource) + goto err_alloc; + + resource->type = VME_DMA; + resource->entry = &allocated_ctrlr->list; + + return resource; + +err_alloc: + /* Unlock image */ + mutex_lock(&dma_ctrlr->mtx); + dma_ctrlr->locked = 0; + mutex_unlock(&dma_ctrlr->mtx); +err_ctrlr: +err_bus: + return NULL; +} +EXPORT_SYMBOL(vme_dma_request); + +/** + * vme_new_dma_list - Create new VME DMA list. + * @resource: Pointer to VME DMA resource. + * + * Create a new VME DMA list. It is the responsibility of the user to free + * the list once it is no longer required with vme_dma_list_free(). + * + * Return: Pointer to new VME DMA list, NULL on allocation failure or invalid + * VME DMA resource. + */ +struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource) +{ + struct vme_dma_list *dma_list; + + if (resource->type != VME_DMA) { + printk(KERN_ERR "Not a DMA resource\n"); + return NULL; + } + + dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL); + if (!dma_list) + return NULL; + + INIT_LIST_HEAD(&dma_list->entries); + dma_list->parent = list_entry(resource->entry, + struct vme_dma_resource, + list); + mutex_init(&dma_list->mtx); + + return dma_list; +} +EXPORT_SYMBOL(vme_new_dma_list); + +/** + * vme_dma_pattern_attribute - Create "Pattern" type VME DMA list attribute. + * @pattern: Value to use used as pattern + * @type: Type of pattern to be written. + * + * Create VME DMA list attribute for pattern generation. It is the + * responsibility of the user to free used attributes using + * vme_dma_free_attribute(). + * + * Return: Pointer to VME DMA attribute, NULL on failure. + */ +struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type) +{ + struct vme_dma_attr *attributes; + struct vme_dma_pattern *pattern_attr; + + attributes = kmalloc(sizeof(*attributes), GFP_KERNEL); + if (!attributes) + goto err_attr; + + pattern_attr = kmalloc(sizeof(*pattern_attr), GFP_KERNEL); + if (!pattern_attr) + goto err_pat; + + attributes->type = VME_DMA_PATTERN; + attributes->private = (void *)pattern_attr; + + pattern_attr->pattern = pattern; + pattern_attr->type = type; + + return attributes; + +err_pat: + kfree(attributes); +err_attr: + return NULL; +} +EXPORT_SYMBOL(vme_dma_pattern_attribute); + +/** + * vme_dma_pci_attribute - Create "PCI" type VME DMA list attribute. + * @address: PCI base address for DMA transfer. + * + * Create VME DMA list attribute pointing to a location on PCI for DMA + * transfers. It is the responsibility of the user to free used attributes + * using vme_dma_free_attribute(). + * + * Return: Pointer to VME DMA attribute, NULL on failure. + */ +struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address) +{ + struct vme_dma_attr *attributes; + struct vme_dma_pci *pci_attr; + + /* XXX Run some sanity checks here */ + + attributes = kmalloc(sizeof(*attributes), GFP_KERNEL); + if (!attributes) + goto err_attr; + + pci_attr = kmalloc(sizeof(*pci_attr), GFP_KERNEL); + if (!pci_attr) + goto err_pci; + + attributes->type = VME_DMA_PCI; + attributes->private = (void *)pci_attr; + + pci_attr->address = address; + + return attributes; + +err_pci: + kfree(attributes); +err_attr: + return NULL; +} +EXPORT_SYMBOL(vme_dma_pci_attribute); + +/** + * vme_dma_vme_attribute - Create "VME" type VME DMA list attribute. + * @address: VME base address for DMA transfer. + * @aspace: VME address space to use for DMA transfer. + * @cycle: VME bus cycle to use for DMA transfer. + * @dwidth: VME data width to use for DMA transfer. + * + * Create VME DMA list attribute pointing to a location on the VME bus for DMA + * transfers. It is the responsibility of the user to free used attributes + * using vme_dma_free_attribute(). + * + * Return: Pointer to VME DMA attribute, NULL on failure. + */ +struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address, + u32 aspace, u32 cycle, u32 dwidth) +{ + struct vme_dma_attr *attributes; + struct vme_dma_vme *vme_attr; + + attributes = kmalloc(sizeof(*attributes), GFP_KERNEL); + if (!attributes) + goto err_attr; + + vme_attr = kmalloc(sizeof(*vme_attr), GFP_KERNEL); + if (!vme_attr) + goto err_vme; + + attributes->type = VME_DMA_VME; + attributes->private = (void *)vme_attr; + + vme_attr->address = address; + vme_attr->aspace = aspace; + vme_attr->cycle = cycle; + vme_attr->dwidth = dwidth; + + return attributes; + +err_vme: + kfree(attributes); +err_attr: + return NULL; +} +EXPORT_SYMBOL(vme_dma_vme_attribute); + +/** + * vme_dma_free_attribute - Free DMA list attribute. + * @attributes: Pointer to DMA list attribute. + * + * Free VME DMA list attribute. VME DMA list attributes can be safely freed + * once vme_dma_list_add() has returned. + */ +void vme_dma_free_attribute(struct vme_dma_attr *attributes) +{ + kfree(attributes->private); + kfree(attributes); +} +EXPORT_SYMBOL(vme_dma_free_attribute); + +/** + * vme_dma_list_add - Add enty to a VME DMA list. + * @list: Pointer to VME list. + * @src: Pointer to DMA list attribute to use as source. + * @dest: Pointer to DMA list attribute to use as destination. + * @count: Number of bytes to transfer. + * + * Add an entry to the provided VME DMA list. Entry requires pointers to source + * and destination DMA attributes and a count. + * + * Please note, the attributes supported as source and destinations for + * transfers are hardware dependent. + * + * Return: Zero on success, -EINVAL if operation is not supported on this + * device or if the link list has already been submitted for execution. + * Hardware specific errors also possible. + */ +int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src, + struct vme_dma_attr *dest, size_t count) +{ + struct vme_bridge *bridge = list->parent->parent; + int retval; + + if (!bridge->dma_list_add) { + printk(KERN_WARNING "Link List DMA generation not supported\n"); + return -EINVAL; + } + + if (!mutex_trylock(&list->mtx)) { + printk(KERN_ERR "Link List already submitted\n"); + return -EINVAL; + } + + retval = bridge->dma_list_add(list, src, dest, count); + + mutex_unlock(&list->mtx); + + return retval; +} +EXPORT_SYMBOL(vme_dma_list_add); + +/** + * vme_dma_list_exec - Queue a VME DMA list for execution. + * @list: Pointer to VME list. + * + * Queue the provided VME DMA list for execution. The call will return once the + * list has been executed. + * + * Return: Zero on success, -EINVAL if operation is not supported on this + * device. Hardware specific errors also possible. + */ +int vme_dma_list_exec(struct vme_dma_list *list) +{ + struct vme_bridge *bridge = list->parent->parent; + int retval; + + if (!bridge->dma_list_exec) { + printk(KERN_ERR "Link List DMA execution not supported\n"); + return -EINVAL; + } + + mutex_lock(&list->mtx); + + retval = bridge->dma_list_exec(list); + + mutex_unlock(&list->mtx); + + return retval; +} +EXPORT_SYMBOL(vme_dma_list_exec); + +/** + * vme_dma_list_free - Free a VME DMA list. + * @list: Pointer to VME list. + * + * Free the provided DMA list and all its entries. + * + * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource + * is still in use. Hardware specific errors also possible. + */ +int vme_dma_list_free(struct vme_dma_list *list) +{ + struct vme_bridge *bridge = list->parent->parent; + int retval; + + if (!bridge->dma_list_empty) { + printk(KERN_WARNING "Emptying of Link Lists not supported\n"); + return -EINVAL; + } + + if (!mutex_trylock(&list->mtx)) { + printk(KERN_ERR "Link List in use\n"); + return -EBUSY; + } + + /* + * Empty out all of the entries from the DMA list. We need to go to the + * low level driver as DMA entries are driver specific. + */ + retval = bridge->dma_list_empty(list); + if (retval) { + printk(KERN_ERR "Unable to empty link-list entries\n"); + mutex_unlock(&list->mtx); + return retval; + } + mutex_unlock(&list->mtx); + kfree(list); + + return retval; +} +EXPORT_SYMBOL(vme_dma_list_free); + +/** + * vme_dma_free - Free a VME DMA resource. + * @resource: Pointer to VME DMA resource. + * + * Free the provided DMA resource so that it may be reallocated. + * + * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource + * is still active. + */ +int vme_dma_free(struct vme_resource *resource) +{ + struct vme_dma_resource *ctrlr; + + if (resource->type != VME_DMA) { + printk(KERN_ERR "Not a DMA resource\n"); + return -EINVAL; + } + + ctrlr = list_entry(resource->entry, struct vme_dma_resource, list); + + if (!mutex_trylock(&ctrlr->mtx)) { + printk(KERN_ERR "Resource busy, can't free\n"); + return -EBUSY; + } + + if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) { + printk(KERN_WARNING "Resource still processing transfers\n"); + mutex_unlock(&ctrlr->mtx); + return -EBUSY; + } + + ctrlr->locked = 0; + + mutex_unlock(&ctrlr->mtx); + + kfree(resource); + + return 0; +} +EXPORT_SYMBOL(vme_dma_free); + +void vme_bus_error_handler(struct vme_bridge *bridge, + unsigned long long address, int am) +{ + struct list_head *handler_pos = NULL; + struct vme_error_handler *handler; + int handler_triggered = 0; + u32 aspace = vme_get_aspace(am); + + list_for_each(handler_pos, &bridge->vme_error_handlers) { + handler = list_entry(handler_pos, struct vme_error_handler, + list); + if ((aspace == handler->aspace) && + (address >= handler->start) && + (address < handler->end)) { + if (!handler->num_errors) + handler->first_error = address; + if (handler->num_errors != UINT_MAX) + handler->num_errors++; + handler_triggered = 1; + } + } + + if (!handler_triggered) + dev_err(bridge->parent, + "Unhandled VME access error at address 0x%llx\n", + address); +} +EXPORT_SYMBOL(vme_bus_error_handler); + +struct vme_error_handler *vme_register_error_handler( + struct vme_bridge *bridge, u32 aspace, + unsigned long long address, size_t len) +{ + struct vme_error_handler *handler; + + handler = kmalloc(sizeof(*handler), GFP_ATOMIC); + if (!handler) + return NULL; + + handler->aspace = aspace; + handler->start = address; + handler->end = address + len; + handler->num_errors = 0; + handler->first_error = 0; + list_add_tail(&handler->list, &bridge->vme_error_handlers); + + return handler; +} +EXPORT_SYMBOL(vme_register_error_handler); + +void vme_unregister_error_handler(struct vme_error_handler *handler) +{ + list_del(&handler->list); + kfree(handler); +} +EXPORT_SYMBOL(vme_unregister_error_handler); + +void vme_irq_handler(struct vme_bridge *bridge, int level, int statid) +{ + void (*call)(int, int, void *); + void *priv_data; + + call = bridge->irq[level - 1].callback[statid].func; + priv_data = bridge->irq[level - 1].callback[statid].priv_data; + if (call) + call(level, statid, priv_data); + else + printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n", + level, statid); +} +EXPORT_SYMBOL(vme_irq_handler); + +/** + * vme_irq_request - Request a specific VME interrupt. + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * @level: Interrupt priority being requested. + * @statid: Interrupt vector being requested. + * @callback: Pointer to callback function called when VME interrupt/vector + * received. + * @priv_data: Generic pointer that will be passed to the callback function. + * + * Request callback to be attached as a handler for VME interrupts with provided + * level and statid. + * + * Return: Zero on success, -EINVAL on invalid vme device, level or if the + * function is not supported, -EBUSY if the level/statid combination is + * already in use. Hardware specific errors also possible. + */ +int vme_irq_request(struct vme_dev *vdev, int level, int statid, + void (*callback)(int, int, void *), + void *priv_data) +{ + struct vme_bridge *bridge; + + bridge = vdev->bridge; + if (!bridge) { + printk(KERN_ERR "Can't find VME bus\n"); + return -EINVAL; + } + + if ((level < 1) || (level > 7)) { + printk(KERN_ERR "Invalid interrupt level\n"); + return -EINVAL; + } + + if (!bridge->irq_set) { + printk(KERN_ERR "Configuring interrupts not supported\n"); + return -EINVAL; + } + + mutex_lock(&bridge->irq_mtx); + + if (bridge->irq[level - 1].callback[statid].func) { + mutex_unlock(&bridge->irq_mtx); + printk(KERN_WARNING "VME Interrupt already taken\n"); + return -EBUSY; + } + + bridge->irq[level - 1].count++; + bridge->irq[level - 1].callback[statid].priv_data = priv_data; + bridge->irq[level - 1].callback[statid].func = callback; + + /* Enable IRQ level */ + bridge->irq_set(bridge, level, 1, 1); + + mutex_unlock(&bridge->irq_mtx); + + return 0; +} +EXPORT_SYMBOL(vme_irq_request); + +/** + * vme_irq_free - Free a VME interrupt. + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * @level: Interrupt priority of interrupt being freed. + * @statid: Interrupt vector of interrupt being freed. + * + * Remove previously attached callback from VME interrupt priority/vector. + */ +void vme_irq_free(struct vme_dev *vdev, int level, int statid) +{ + struct vme_bridge *bridge; + + bridge = vdev->bridge; + if (!bridge) { + printk(KERN_ERR "Can't find VME bus\n"); + return; + } + + if ((level < 1) || (level > 7)) { + printk(KERN_ERR "Invalid interrupt level\n"); + return; + } + + if (!bridge->irq_set) { + printk(KERN_ERR "Configuring interrupts not supported\n"); + return; + } + + mutex_lock(&bridge->irq_mtx); + + bridge->irq[level - 1].count--; + + /* Disable IRQ level if no more interrupts attached at this level*/ + if (bridge->irq[level - 1].count == 0) + bridge->irq_set(bridge, level, 0, 1); + + bridge->irq[level - 1].callback[statid].func = NULL; + bridge->irq[level - 1].callback[statid].priv_data = NULL; + + mutex_unlock(&bridge->irq_mtx); +} +EXPORT_SYMBOL(vme_irq_free); + +/** + * vme_irq_generate - Generate VME interrupt. + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * @level: Interrupt priority at which to assert the interrupt. + * @statid: Interrupt vector to associate with the interrupt. + * + * Generate a VME interrupt of the provided level and with the provided + * statid. + * + * Return: Zero on success, -EINVAL on invalid vme device, level or if the + * function is not supported. Hardware specific errors also possible. + */ +int vme_irq_generate(struct vme_dev *vdev, int level, int statid) +{ + struct vme_bridge *bridge; + + bridge = vdev->bridge; + if (!bridge) { + printk(KERN_ERR "Can't find VME bus\n"); + return -EINVAL; + } + + if ((level < 1) || (level > 7)) { + printk(KERN_WARNING "Invalid interrupt level\n"); + return -EINVAL; + } + + if (!bridge->irq_generate) { + printk(KERN_WARNING "Interrupt generation not supported\n"); + return -EINVAL; + } + + return bridge->irq_generate(bridge, level, statid); +} +EXPORT_SYMBOL(vme_irq_generate); + +/** + * vme_lm_request - Request a VME location monitor + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * + * Allocate a location monitor resource to the driver. A location monitor + * allows the driver to monitor accesses to a contiguous number of + * addresses on the VME bus. + * + * Return: Pointer to a VME resource on success or NULL on failure. + */ +struct vme_resource *vme_lm_request(struct vme_dev *vdev) +{ + struct vme_bridge *bridge; + struct list_head *lm_pos = NULL; + struct vme_lm_resource *allocated_lm = NULL; + struct vme_lm_resource *lm = NULL; + struct vme_resource *resource = NULL; + + bridge = vdev->bridge; + if (!bridge) { + printk(KERN_ERR "Can't find VME bus\n"); + goto err_bus; + } + + /* Loop through LM resources */ + list_for_each(lm_pos, &bridge->lm_resources) { + lm = list_entry(lm_pos, + struct vme_lm_resource, list); + if (!lm) { + printk(KERN_ERR "Registered NULL Location Monitor resource\n"); + continue; + } + + /* Find an unlocked controller */ + mutex_lock(&lm->mtx); + if (lm->locked == 0) { + lm->locked = 1; + mutex_unlock(&lm->mtx); + allocated_lm = lm; + break; + } + mutex_unlock(&lm->mtx); + } + + /* Check to see if we found a resource */ + if (!allocated_lm) + goto err_lm; + + resource = kmalloc(sizeof(*resource), GFP_KERNEL); + if (!resource) + goto err_alloc; + + resource->type = VME_LM; + resource->entry = &allocated_lm->list; + + return resource; + +err_alloc: + /* Unlock image */ + mutex_lock(&lm->mtx); + lm->locked = 0; + mutex_unlock(&lm->mtx); +err_lm: +err_bus: + return NULL; +} +EXPORT_SYMBOL(vme_lm_request); + +/** + * vme_lm_count - Determine number of VME Addresses monitored + * @resource: Pointer to VME location monitor resource. + * + * The number of contiguous addresses monitored is hardware dependent. + * Return the number of contiguous addresses monitored by the + * location monitor. + * + * Return: Count of addresses monitored or -EINVAL when provided with an + * invalid location monitor resource. + */ +int vme_lm_count(struct vme_resource *resource) +{ + struct vme_lm_resource *lm; + + if (resource->type != VME_LM) { + printk(KERN_ERR "Not a Location Monitor resource\n"); + return -EINVAL; + } + + lm = list_entry(resource->entry, struct vme_lm_resource, list); + + return lm->monitors; +} +EXPORT_SYMBOL(vme_lm_count); + +/** + * vme_lm_set - Configure location monitor + * @resource: Pointer to VME location monitor resource. + * @lm_base: Base address to monitor. + * @aspace: VME address space to monitor. + * @cycle: VME bus cycle type to monitor. + * + * Set the base address, address space and cycle type of accesses to be + * monitored by the location monitor. + * + * Return: Zero on success, -EINVAL when provided with an invalid location + * monitor resource or function is not supported. Hardware specific + * errors may also be returned. + */ +int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base, + u32 aspace, u32 cycle) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_lm_resource *lm; + + if (resource->type != VME_LM) { + printk(KERN_ERR "Not a Location Monitor resource\n"); + return -EINVAL; + } + + lm = list_entry(resource->entry, struct vme_lm_resource, list); + + if (!bridge->lm_set) { + printk(KERN_ERR "vme_lm_set not supported\n"); + return -EINVAL; + } + + return bridge->lm_set(lm, lm_base, aspace, cycle); +} +EXPORT_SYMBOL(vme_lm_set); + +/** + * vme_lm_get - Retrieve location monitor settings + * @resource: Pointer to VME location monitor resource. + * @lm_base: Pointer used to output the base address monitored. + * @aspace: Pointer used to output the address space monitored. + * @cycle: Pointer used to output the VME bus cycle type monitored. + * + * Retrieve the base address, address space and cycle type of accesses to + * be monitored by the location monitor. + * + * Return: Zero on success, -EINVAL when provided with an invalid location + * monitor resource or function is not supported. Hardware specific + * errors may also be returned. + */ +int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base, + u32 *aspace, u32 *cycle) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_lm_resource *lm; + + if (resource->type != VME_LM) { + printk(KERN_ERR "Not a Location Monitor resource\n"); + return -EINVAL; + } + + lm = list_entry(resource->entry, struct vme_lm_resource, list); + + if (!bridge->lm_get) { + printk(KERN_ERR "vme_lm_get not supported\n"); + return -EINVAL; + } + + return bridge->lm_get(lm, lm_base, aspace, cycle); +} +EXPORT_SYMBOL(vme_lm_get); + +/** + * vme_lm_attach - Provide callback for location monitor address + * @resource: Pointer to VME location monitor resource. + * @monitor: Offset to which callback should be attached. + * @callback: Pointer to callback function called when triggered. + * @data: Generic pointer that will be passed to the callback function. + * + * Attach a callback to the specificed offset into the location monitors + * monitored addresses. A generic pointer is provided to allow data to be + * passed to the callback when called. + * + * Return: Zero on success, -EINVAL when provided with an invalid location + * monitor resource or function is not supported. Hardware specific + * errors may also be returned. + */ +int vme_lm_attach(struct vme_resource *resource, int monitor, + void (*callback)(void *), void *data) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_lm_resource *lm; + + if (resource->type != VME_LM) { + printk(KERN_ERR "Not a Location Monitor resource\n"); + return -EINVAL; + } + + lm = list_entry(resource->entry, struct vme_lm_resource, list); + + if (!bridge->lm_attach) { + printk(KERN_ERR "vme_lm_attach not supported\n"); + return -EINVAL; + } + + return bridge->lm_attach(lm, monitor, callback, data); +} +EXPORT_SYMBOL(vme_lm_attach); + +/** + * vme_lm_detach - Remove callback for location monitor address + * @resource: Pointer to VME location monitor resource. + * @monitor: Offset to which callback should be removed. + * + * Remove the callback associated with the specificed offset into the + * location monitors monitored addresses. + * + * Return: Zero on success, -EINVAL when provided with an invalid location + * monitor resource or function is not supported. Hardware specific + * errors may also be returned. + */ +int vme_lm_detach(struct vme_resource *resource, int monitor) +{ + struct vme_bridge *bridge = find_bridge(resource); + struct vme_lm_resource *lm; + + if (resource->type != VME_LM) { + printk(KERN_ERR "Not a Location Monitor resource\n"); + return -EINVAL; + } + + lm = list_entry(resource->entry, struct vme_lm_resource, list); + + if (!bridge->lm_detach) { + printk(KERN_ERR "vme_lm_detach not supported\n"); + return -EINVAL; + } + + return bridge->lm_detach(lm, monitor); +} +EXPORT_SYMBOL(vme_lm_detach); + +/** + * vme_lm_free - Free allocated VME location monitor + * @resource: Pointer to VME location monitor resource. + * + * Free allocation of a VME location monitor. + * + * WARNING: This function currently expects that any callbacks that have + * been attached to the location monitor have been removed. + * + * Return: Zero on success, -EINVAL when provided with an invalid location + * monitor resource. + */ +void vme_lm_free(struct vme_resource *resource) +{ + struct vme_lm_resource *lm; + + if (resource->type != VME_LM) { + printk(KERN_ERR "Not a Location Monitor resource\n"); + return; + } + + lm = list_entry(resource->entry, struct vme_lm_resource, list); + + mutex_lock(&lm->mtx); + + /* XXX + * Check to see that there aren't any callbacks still attached, if + * there are we should probably be detaching them! + */ + + lm->locked = 0; + + mutex_unlock(&lm->mtx); + + kfree(resource); +} +EXPORT_SYMBOL(vme_lm_free); + +/** + * vme_slot_num - Retrieve slot ID + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * + * Retrieve the slot ID associated with the provided VME device. + * + * Return: The slot ID on success, -EINVAL if VME bridge cannot be determined + * or the function is not supported. Hardware specific errors may also + * be returned. + */ +int vme_slot_num(struct vme_dev *vdev) +{ + struct vme_bridge *bridge; + + bridge = vdev->bridge; + if (!bridge) { + printk(KERN_ERR "Can't find VME bus\n"); + return -EINVAL; + } + + if (!bridge->slot_get) { + printk(KERN_WARNING "vme_slot_num not supported\n"); + return -EINVAL; + } + + return bridge->slot_get(bridge); +} +EXPORT_SYMBOL(vme_slot_num); + +/** + * vme_bus_num - Retrieve bus number + * @vdev: Pointer to VME device struct vme_dev assigned to driver instance. + * + * Retrieve the bus enumeration associated with the provided VME device. + * + * Return: The bus number on success, -EINVAL if VME bridge cannot be + * determined. + */ +int vme_bus_num(struct vme_dev *vdev) +{ + struct vme_bridge *bridge; + + bridge = vdev->bridge; + if (!bridge) { + pr_err("Can't find VME bus\n"); + return -EINVAL; + } + + return bridge->num; +} +EXPORT_SYMBOL(vme_bus_num); + +/* - Bridge Registration --------------------------------------------------- */ + +static void vme_dev_release(struct device *dev) +{ + kfree(dev_to_vme_dev(dev)); +} + +/* Common bridge initialization */ +struct vme_bridge *vme_init_bridge(struct vme_bridge *bridge) +{ + INIT_LIST_HEAD(&bridge->vme_error_handlers); + INIT_LIST_HEAD(&bridge->master_resources); + INIT_LIST_HEAD(&bridge->slave_resources); + INIT_LIST_HEAD(&bridge->dma_resources); + INIT_LIST_HEAD(&bridge->lm_resources); + mutex_init(&bridge->irq_mtx); + + return bridge; +} +EXPORT_SYMBOL(vme_init_bridge); + +int vme_register_bridge(struct vme_bridge *bridge) +{ + int i; + int ret = -1; + + mutex_lock(&vme_buses_lock); + for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) { + if ((vme_bus_numbers & (1 << i)) == 0) { + vme_bus_numbers |= (1 << i); + bridge->num = i; + INIT_LIST_HEAD(&bridge->devices); + list_add_tail(&bridge->bus_list, &vme_bus_list); + ret = 0; + break; + } + } + mutex_unlock(&vme_buses_lock); + + return ret; +} +EXPORT_SYMBOL(vme_register_bridge); + +void vme_unregister_bridge(struct vme_bridge *bridge) +{ + struct vme_dev *vdev; + struct vme_dev *tmp; + + mutex_lock(&vme_buses_lock); + vme_bus_numbers &= ~(1 << bridge->num); + list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) { + list_del(&vdev->drv_list); + list_del(&vdev->bridge_list); + device_unregister(&vdev->dev); + } + list_del(&bridge->bus_list); + mutex_unlock(&vme_buses_lock); +} +EXPORT_SYMBOL(vme_unregister_bridge); + +/* - Driver Registration --------------------------------------------------- */ + +static int __vme_register_driver_bus(struct vme_driver *drv, + struct vme_bridge *bridge, unsigned int ndevs) +{ + int err; + unsigned int i; + struct vme_dev *vdev; + struct vme_dev *tmp; + + for (i = 0; i < ndevs; i++) { + vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); + if (!vdev) { + err = -ENOMEM; + goto err_devalloc; + } + vdev->num = i; + vdev->bridge = bridge; + vdev->dev.platform_data = drv; + vdev->dev.release = vme_dev_release; + vdev->dev.parent = bridge->parent; + vdev->dev.bus = &vme_bus_type; + dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num, + vdev->num); + + err = device_register(&vdev->dev); + if (err) + goto err_reg; + + if (vdev->dev.platform_data) { + list_add_tail(&vdev->drv_list, &drv->devices); + list_add_tail(&vdev->bridge_list, &bridge->devices); + } else + device_unregister(&vdev->dev); + } + return 0; + +err_reg: + put_device(&vdev->dev); +err_devalloc: + list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) { + list_del(&vdev->drv_list); + list_del(&vdev->bridge_list); + device_unregister(&vdev->dev); + } + return err; +} + +static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs) +{ + struct vme_bridge *bridge; + int err = 0; + + mutex_lock(&vme_buses_lock); + list_for_each_entry(bridge, &vme_bus_list, bus_list) { + /* + * This cannot cause trouble as we already have vme_buses_lock + * and if the bridge is removed, it will have to go through + * vme_unregister_bridge() to do it (which calls remove() on + * the bridge which in turn tries to acquire vme_buses_lock and + * will have to wait). + */ + err = __vme_register_driver_bus(drv, bridge, ndevs); + if (err) + break; + } + mutex_unlock(&vme_buses_lock); + return err; +} + +/** + * vme_register_driver - Register a VME driver + * @drv: Pointer to VME driver structure to register. + * @ndevs: Maximum number of devices to allow to be enumerated. + * + * Register a VME device driver with the VME subsystem. + * + * Return: Zero on success, error value on registration failure. + */ +int vme_register_driver(struct vme_driver *drv, unsigned int ndevs) +{ + int err; + + drv->driver.name = drv->name; + drv->driver.bus = &vme_bus_type; + INIT_LIST_HEAD(&drv->devices); + + err = driver_register(&drv->driver); + if (err) + return err; + + err = __vme_register_driver(drv, ndevs); + if (err) + driver_unregister(&drv->driver); + + return err; +} +EXPORT_SYMBOL(vme_register_driver); + +/** + * vme_unregister_driver - Unregister a VME driver + * @drv: Pointer to VME driver structure to unregister. + * + * Unregister a VME device driver from the VME subsystem. + */ +void vme_unregister_driver(struct vme_driver *drv) +{ + struct vme_dev *dev, *dev_tmp; + + mutex_lock(&vme_buses_lock); + list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) { + list_del(&dev->drv_list); + list_del(&dev->bridge_list); + device_unregister(&dev->dev); + } + mutex_unlock(&vme_buses_lock); + + driver_unregister(&drv->driver); +} +EXPORT_SYMBOL(vme_unregister_driver); + +/* - Bus Registration ------------------------------------------------------ */ + +static int vme_bus_match(struct device *dev, struct device_driver *drv) +{ + struct vme_driver *vme_drv; + + vme_drv = container_of(drv, struct vme_driver, driver); + + if (dev->platform_data == vme_drv) { + struct vme_dev *vdev = dev_to_vme_dev(dev); + + if (vme_drv->match && vme_drv->match(vdev)) + return 1; + + dev->platform_data = NULL; + } + return 0; +} + +static int vme_bus_probe(struct device *dev) +{ + struct vme_driver *driver; + struct vme_dev *vdev = dev_to_vme_dev(dev); + + driver = dev->platform_data; + if (driver->probe) + return driver->probe(vdev); + + return -ENODEV; +} + +static void vme_bus_remove(struct device *dev) +{ + struct vme_driver *driver; + struct vme_dev *vdev = dev_to_vme_dev(dev); + + driver = dev->platform_data; + if (driver->remove) + driver->remove(vdev); +} + +struct bus_type vme_bus_type = { + .name = "vme", + .match = vme_bus_match, + .probe = vme_bus_probe, + .remove = vme_bus_remove, +}; +EXPORT_SYMBOL(vme_bus_type); + +static int __init vme_init(void) +{ + return bus_register(&vme_bus_type); +} +subsys_initcall(vme_init); diff --git a/drivers/staging/vme_user/vme.h b/drivers/staging/vme_user/vme.h new file mode 100644 index 000000000000..b204a9b4be1b --- /dev/null +++ b/drivers/staging/vme_user/vme.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _VME_H_ +#define _VME_H_ + +/* Resource Type */ +enum vme_resource_type { + VME_MASTER, + VME_SLAVE, + VME_DMA, + VME_LM +}; + +/* VME Address Spaces */ +#define VME_A16 0x1 +#define VME_A24 0x2 +#define VME_A32 0x4 +#define VME_A64 0x8 +#define VME_CRCSR 0x10 +#define VME_USER1 0x20 +#define VME_USER2 0x40 +#define VME_USER3 0x80 +#define VME_USER4 0x100 + +#define VME_A16_MAX 0x10000ULL +#define VME_A24_MAX 0x1000000ULL +#define VME_A32_MAX 0x100000000ULL +#define VME_A64_MAX 0x10000000000000000ULL +#define VME_CRCSR_MAX 0x1000000ULL + + +/* VME Cycle Types */ +#define VME_SCT 0x1 +#define VME_BLT 0x2 +#define VME_MBLT 0x4 +#define VME_2eVME 0x8 +#define VME_2eSST 0x10 +#define VME_2eSSTB 0x20 + +#define VME_2eSST160 0x100 +#define VME_2eSST267 0x200 +#define VME_2eSST320 0x400 + +#define VME_SUPER 0x1000 +#define VME_USER 0x2000 +#define VME_PROG 0x4000 +#define VME_DATA 0x8000 + +/* VME Data Widths */ +#define VME_D8 0x1 +#define VME_D16 0x2 +#define VME_D32 0x4 +#define VME_D64 0x8 + +/* Arbitration Scheduling Modes */ +#define VME_R_ROBIN_MODE 0x1 +#define VME_PRIORITY_MODE 0x2 + +#define VME_DMA_PATTERN (1<<0) +#define VME_DMA_PCI (1<<1) +#define VME_DMA_VME (1<<2) + +#define VME_DMA_PATTERN_BYTE (1<<0) +#define VME_DMA_PATTERN_WORD (1<<1) +#define VME_DMA_PATTERN_INCREMENT (1<<2) + +#define VME_DMA_VME_TO_MEM (1<<0) +#define VME_DMA_MEM_TO_VME (1<<1) +#define VME_DMA_VME_TO_VME (1<<2) +#define VME_DMA_MEM_TO_MEM (1<<3) +#define VME_DMA_PATTERN_TO_VME (1<<4) +#define VME_DMA_PATTERN_TO_MEM (1<<5) + +struct vme_dma_attr { + u32 type; + void *private; +}; + +struct vme_resource { + enum vme_resource_type type; + struct list_head *entry; +}; + +extern struct bus_type vme_bus_type; + +/* Number of VME interrupt vectors */ +#define VME_NUM_STATUSID 256 + +/* VME_MAX_BRIDGES comes from the type of vme_bus_numbers */ +#define VME_MAX_BRIDGES (sizeof(unsigned int)*8) +#define VME_MAX_SLOTS 32 + +#define VME_SLOT_CURRENT -1 +#define VME_SLOT_ALL -2 + +/** + * struct vme_dev - Structure representing a VME device + * @num: The device number + * @bridge: Pointer to the bridge device this device is on + * @dev: Internal device structure + * @drv_list: List of devices (per driver) + * @bridge_list: List of devices (per bridge) + */ +struct vme_dev { + int num; + struct vme_bridge *bridge; + struct device dev; + struct list_head drv_list; + struct list_head bridge_list; +}; + +/** + * struct vme_driver - Structure representing a VME driver + * @name: Driver name, should be unique among VME drivers and usually the same + * as the module name. + * @match: Callback used to determine whether probe should be run. + * @probe: Callback for device binding, called when new device is detected. + * @remove: Callback, called on device removal. + * @driver: Underlying generic device driver structure. + * @devices: List of VME devices (struct vme_dev) associated with this driver. + */ +struct vme_driver { + const char *name; + int (*match)(struct vme_dev *); + int (*probe)(struct vme_dev *); + void (*remove)(struct vme_dev *); + struct device_driver driver; + struct list_head devices; +}; + +void *vme_alloc_consistent(struct vme_resource *, size_t, dma_addr_t *); +void vme_free_consistent(struct vme_resource *, size_t, void *, + dma_addr_t); + +size_t vme_get_size(struct vme_resource *); +int vme_check_window(u32 aspace, unsigned long long vme_base, + unsigned long long size); + +struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32); +int vme_slave_set(struct vme_resource *, int, unsigned long long, + unsigned long long, dma_addr_t, u32, u32); +int vme_slave_get(struct vme_resource *, int *, unsigned long long *, + unsigned long long *, dma_addr_t *, u32 *, u32 *); +void vme_slave_free(struct vme_resource *); + +struct vme_resource *vme_master_request(struct vme_dev *, u32, u32, u32); +int vme_master_set(struct vme_resource *, int, unsigned long long, + unsigned long long, u32, u32, u32); +int vme_master_get(struct vme_resource *, int *, unsigned long long *, + unsigned long long *, u32 *, u32 *, u32 *); +ssize_t vme_master_read(struct vme_resource *, void *, size_t, loff_t); +ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t); +unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int, + unsigned int, loff_t); +int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma); +void vme_master_free(struct vme_resource *); + +struct vme_resource *vme_dma_request(struct vme_dev *, u32); +struct vme_dma_list *vme_new_dma_list(struct vme_resource *); +struct vme_dma_attr *vme_dma_pattern_attribute(u32, u32); +struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t); +struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, u32, u32, u32); +void vme_dma_free_attribute(struct vme_dma_attr *); +int vme_dma_list_add(struct vme_dma_list *, struct vme_dma_attr *, + struct vme_dma_attr *, size_t); +int vme_dma_list_exec(struct vme_dma_list *); +int vme_dma_list_free(struct vme_dma_list *); +int vme_dma_free(struct vme_resource *); + +int vme_irq_request(struct vme_dev *, int, int, + void (*callback)(int, int, void *), void *); +void vme_irq_free(struct vme_dev *, int, int); +int vme_irq_generate(struct vme_dev *, int, int); + +struct vme_resource *vme_lm_request(struct vme_dev *); +int vme_lm_count(struct vme_resource *); +int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32); +int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *); +int vme_lm_attach(struct vme_resource *, int, void (*callback)(void *), void *); +int vme_lm_detach(struct vme_resource *, int); +void vme_lm_free(struct vme_resource *); + +int vme_slot_num(struct vme_dev *); +int vme_bus_num(struct vme_dev *); + +int vme_register_driver(struct vme_driver *, unsigned int); +void vme_unregister_driver(struct vme_driver *); + + +#endif /* _VME_H_ */ + diff --git a/drivers/staging/vme_user/vme_bridge.h b/drivers/staging/vme_user/vme_bridge.h new file mode 100644 index 000000000000..0bbefe9851d7 --- /dev/null +++ b/drivers/staging/vme_user/vme_bridge.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _VME_BRIDGE_H_ +#define _VME_BRIDGE_H_ + +#include "vme.h" + +#define VME_CRCSR_BUF_SIZE (508*1024) +/* + * Resource structures + */ +struct vme_master_resource { + struct list_head list; + struct vme_bridge *parent; + /* + * We are likely to need to access the VME bus in interrupt context, so + * protect master routines with a spinlock rather than a mutex. + */ + spinlock_t lock; + int locked; + int number; + u32 address_attr; + u32 cycle_attr; + u32 width_attr; + struct resource bus_resource; + void __iomem *kern_base; +}; + +struct vme_slave_resource { + struct list_head list; + struct vme_bridge *parent; + struct mutex mtx; + int locked; + int number; + u32 address_attr; + u32 cycle_attr; +}; + +struct vme_dma_pattern { + u32 pattern; + u32 type; +}; + +struct vme_dma_pci { + dma_addr_t address; +}; + +struct vme_dma_vme { + unsigned long long address; + u32 aspace; + u32 cycle; + u32 dwidth; +}; + +struct vme_dma_list { + struct list_head list; + struct vme_dma_resource *parent; + struct list_head entries; + struct mutex mtx; +}; + +struct vme_dma_resource { + struct list_head list; + struct vme_bridge *parent; + struct mutex mtx; + int locked; + int number; + struct list_head pending; + struct list_head running; + u32 route_attr; +}; + +struct vme_lm_resource { + struct list_head list; + struct vme_bridge *parent; + struct mutex mtx; + int locked; + int number; + int monitors; +}; + +struct vme_error_handler { + struct list_head list; + unsigned long long start; /* Beginning of error window */ + unsigned long long end; /* End of error window */ + unsigned long long first_error; /* Address of the first error */ + u32 aspace; /* Address space of error window*/ + unsigned num_errors; /* Number of errors */ +}; + +struct vme_callback { + void (*func)(int, int, void*); + void *priv_data; +}; + +struct vme_irq { + int count; + struct vme_callback callback[VME_NUM_STATUSID]; +}; + +/* Allow 16 characters for name (including null character) */ +#define VMENAMSIZ 16 + +/* This structure stores all the information about one bridge + * The structure should be dynamically allocated by the driver and one instance + * of the structure should be present for each VME chip present in the system. + */ +struct vme_bridge { + char name[VMENAMSIZ]; + int num; + struct list_head master_resources; + struct list_head slave_resources; + struct list_head dma_resources; + struct list_head lm_resources; + + /* List for registered errors handlers */ + struct list_head vme_error_handlers; + /* List of devices on this bridge */ + struct list_head devices; + + /* Bridge Info - XXX Move to private structure? */ + struct device *parent; /* Parent device (eg. pdev->dev for PCI) */ + void *driver_priv; /* Private pointer for the bridge driver */ + struct list_head bus_list; /* list of VME buses */ + + /* Interrupt callbacks */ + struct vme_irq irq[7]; + /* Locking for VME irq callback configuration */ + struct mutex irq_mtx; + + /* Slave Functions */ + int (*slave_get) (struct vme_slave_resource *, int *, + unsigned long long *, unsigned long long *, dma_addr_t *, + u32 *, u32 *); + int (*slave_set) (struct vme_slave_resource *, int, unsigned long long, + unsigned long long, dma_addr_t, u32, u32); + + /* Master Functions */ + int (*master_get) (struct vme_master_resource *, int *, + unsigned long long *, unsigned long long *, u32 *, u32 *, + u32 *); + int (*master_set) (struct vme_master_resource *, int, + unsigned long long, unsigned long long, u32, u32, u32); + ssize_t (*master_read) (struct vme_master_resource *, void *, size_t, + loff_t); + ssize_t (*master_write) (struct vme_master_resource *, void *, size_t, + loff_t); + unsigned int (*master_rmw) (struct vme_master_resource *, unsigned int, + unsigned int, unsigned int, loff_t); + + /* DMA Functions */ + int (*dma_list_add) (struct vme_dma_list *, struct vme_dma_attr *, + struct vme_dma_attr *, size_t); + int (*dma_list_exec) (struct vme_dma_list *); + int (*dma_list_empty) (struct vme_dma_list *); + + /* Interrupt Functions */ + void (*irq_set) (struct vme_bridge *, int, int, int); + int (*irq_generate) (struct vme_bridge *, int, int); + + /* Location monitor functions */ + int (*lm_set) (struct vme_lm_resource *, unsigned long long, u32, u32); + int (*lm_get) (struct vme_lm_resource *, unsigned long long *, u32 *, + u32 *); + int (*lm_attach)(struct vme_lm_resource *, int, + void (*callback)(void *), void *); + int (*lm_detach) (struct vme_lm_resource *, int); + + /* CR/CSR space functions */ + int (*slot_get) (struct vme_bridge *); + + /* Bridge parent interface */ + void *(*alloc_consistent)(struct device *dev, size_t size, + dma_addr_t *dma); + void (*free_consistent)(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma); +}; + +void vme_bus_error_handler(struct vme_bridge *bridge, + unsigned long long address, int am); +void vme_irq_handler(struct vme_bridge *, int, int); + +struct vme_bridge *vme_init_bridge(struct vme_bridge *); +int vme_register_bridge(struct vme_bridge *); +void vme_unregister_bridge(struct vme_bridge *); +struct vme_error_handler *vme_register_error_handler( + struct vme_bridge *bridge, u32 aspace, + unsigned long long address, size_t len); +void vme_unregister_error_handler(struct vme_error_handler *handler); + +#endif /* _VME_BRIDGE_H_ */ diff --git a/drivers/staging/vme_user/vme_fake.c b/drivers/staging/vme_user/vme_fake.c new file mode 100644 index 000000000000..dd646b0c531d --- /dev/null +++ b/drivers/staging/vme_user/vme_fake.c @@ -0,0 +1,1305 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Fake VME bridge support. + * + * This drive provides a fake VME bridge chip, this enables debugging of the + * VME framework in the absence of a VME system. + * + * This driver has to do a number of things in software that would be driven + * by hardware if it was available, it will also result in extra overhead at + * times when compared with driving actual hardware. + * + * Author: Martyn Welch <martyn@welches.me.uk> + * Copyright (c) 2014 Martyn Welch + * + * Based on vme_tsi148.c: + * + * Author: Martyn Welch <martyn.welch@ge.com> + * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. + * + * Based on work by Tom Armistead and Ajit Prem + * Copyright 2004 Motorola Inc. + */ + +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include "vme.h" +#include "vme_bridge.h" + +/* + * Define the number of each that the fake driver supports. + */ +#define FAKE_MAX_MASTER 8 /* Max Master Windows */ +#define FAKE_MAX_SLAVE 8 /* Max Slave Windows */ + +/* Structures to hold information normally held in device registers */ +struct fake_slave_window { + int enabled; + unsigned long long vme_base; + unsigned long long size; + void *buf_base; + u32 aspace; + u32 cycle; +}; + +struct fake_master_window { + int enabled; + unsigned long long vme_base; + unsigned long long size; + u32 aspace; + u32 cycle; + u32 dwidth; +}; + +/* Structure used to hold driver specific information */ +struct fake_driver { + struct vme_bridge *parent; + struct fake_slave_window slaves[FAKE_MAX_SLAVE]; + struct fake_master_window masters[FAKE_MAX_MASTER]; + u32 lm_enabled; + unsigned long long lm_base; + u32 lm_aspace; + u32 lm_cycle; + void (*lm_callback[4])(void *); + void *lm_data[4]; + struct tasklet_struct int_tasklet; + int int_level; + int int_statid; + void *crcsr_kernel; + dma_addr_t crcsr_bus; + /* Only one VME interrupt can be generated at a time, provide locking */ + struct mutex vme_int; +}; + +/* Module parameter */ +static int geoid; + +static const char driver_name[] = "vme_fake"; + +static struct vme_bridge *exit_pointer; + +static struct device *vme_root; + +/* + * Calling VME bus interrupt callback if provided. + */ +static void fake_VIRQ_tasklet(unsigned long data) +{ + struct vme_bridge *fake_bridge; + struct fake_driver *bridge; + + fake_bridge = (struct vme_bridge *) data; + bridge = fake_bridge->driver_priv; + + vme_irq_handler(fake_bridge, bridge->int_level, bridge->int_statid); +} + +/* + * Configure VME interrupt + */ +static void fake_irq_set(struct vme_bridge *fake_bridge, int level, + int state, int sync) +{ + /* Nothing to do */ +} + +static void *fake_pci_to_ptr(dma_addr_t addr) +{ + return (void *)(uintptr_t)addr; +} + +static dma_addr_t fake_ptr_to_pci(void *addr) +{ + return (dma_addr_t)(uintptr_t)addr; +} + +/* + * Generate a VME bus interrupt at the requested level & vector. Wait for + * interrupt to be acked. + */ +static int fake_irq_generate(struct vme_bridge *fake_bridge, int level, + int statid) +{ + struct fake_driver *bridge; + + bridge = fake_bridge->driver_priv; + + mutex_lock(&bridge->vme_int); + + bridge->int_level = level; + + bridge->int_statid = statid; + + /* + * Schedule tasklet to run VME handler to emulate normal VME interrupt + * handler behaviour. + */ + tasklet_schedule(&bridge->int_tasklet); + + mutex_unlock(&bridge->vme_int); + + return 0; +} + +/* + * Initialize a slave window with the requested attributes. + */ +static int fake_slave_set(struct vme_slave_resource *image, int enabled, + unsigned long long vme_base, unsigned long long size, + dma_addr_t buf_base, u32 aspace, u32 cycle) +{ + unsigned int i, granularity = 0; + unsigned long long vme_bound; + struct vme_bridge *fake_bridge; + struct fake_driver *bridge; + + fake_bridge = image->parent; + bridge = fake_bridge->driver_priv; + + i = image->number; + + switch (aspace) { + case VME_A16: + granularity = 0x10; + break; + case VME_A24: + granularity = 0x1000; + break; + case VME_A32: + granularity = 0x10000; + break; + case VME_A64: + granularity = 0x10000; + break; + case VME_CRCSR: + case VME_USER1: + case VME_USER2: + case VME_USER3: + case VME_USER4: + default: + pr_err("Invalid address space\n"); + return -EINVAL; + } + + /* + * Bound address is a valid address for the window, adjust + * accordingly + */ + vme_bound = vme_base + size - granularity; + + if (vme_base & (granularity - 1)) { + pr_err("Invalid VME base alignment\n"); + return -EINVAL; + } + if (vme_bound & (granularity - 1)) { + pr_err("Invalid VME bound alignment\n"); + return -EINVAL; + } + + mutex_lock(&image->mtx); + + bridge->slaves[i].enabled = enabled; + bridge->slaves[i].vme_base = vme_base; + bridge->slaves[i].size = size; + bridge->slaves[i].buf_base = fake_pci_to_ptr(buf_base); + bridge->slaves[i].aspace = aspace; + bridge->slaves[i].cycle = cycle; + + mutex_unlock(&image->mtx); + + return 0; +} + +/* + * Get slave window configuration. + */ +static int fake_slave_get(struct vme_slave_resource *image, int *enabled, + unsigned long long *vme_base, unsigned long long *size, + dma_addr_t *buf_base, u32 *aspace, u32 *cycle) +{ + unsigned int i; + struct fake_driver *bridge; + + bridge = image->parent->driver_priv; + + i = image->number; + + mutex_lock(&image->mtx); + + *enabled = bridge->slaves[i].enabled; + *vme_base = bridge->slaves[i].vme_base; + *size = bridge->slaves[i].size; + *buf_base = fake_ptr_to_pci(bridge->slaves[i].buf_base); + *aspace = bridge->slaves[i].aspace; + *cycle = bridge->slaves[i].cycle; + + mutex_unlock(&image->mtx); + + return 0; +} + +/* + * Set the attributes of an outbound window. + */ +static int fake_master_set(struct vme_master_resource *image, int enabled, + unsigned long long vme_base, unsigned long long size, + u32 aspace, u32 cycle, u32 dwidth) +{ + int retval = 0; + unsigned int i; + struct vme_bridge *fake_bridge; + struct fake_driver *bridge; + + fake_bridge = image->parent; + + bridge = fake_bridge->driver_priv; + + /* Verify input data */ + if (vme_base & 0xFFFF) { + pr_err("Invalid VME Window alignment\n"); + retval = -EINVAL; + goto err_window; + } + + if (size & 0xFFFF) { + pr_err("Invalid size alignment\n"); + retval = -EINVAL; + goto err_window; + } + + if ((size == 0) && (enabled != 0)) { + pr_err("Size must be non-zero for enabled windows\n"); + retval = -EINVAL; + goto err_window; + } + + /* Setup data width */ + switch (dwidth) { + case VME_D8: + case VME_D16: + case VME_D32: + break; + default: + pr_err("Invalid data width\n"); + retval = -EINVAL; + goto err_dwidth; + } + + /* Setup address space */ + switch (aspace) { + case VME_A16: + case VME_A24: + case VME_A32: + case VME_A64: + case VME_CRCSR: + case VME_USER1: + case VME_USER2: + case VME_USER3: + case VME_USER4: + break; + default: + pr_err("Invalid address space\n"); + retval = -EINVAL; + goto err_aspace; + } + + spin_lock(&image->lock); + + i = image->number; + + bridge->masters[i].enabled = enabled; + bridge->masters[i].vme_base = vme_base; + bridge->masters[i].size = size; + bridge->masters[i].aspace = aspace; + bridge->masters[i].cycle = cycle; + bridge->masters[i].dwidth = dwidth; + + spin_unlock(&image->lock); + + return 0; + +err_aspace: +err_dwidth: +err_window: + return retval; + +} + +/* + * Set the attributes of an outbound window. + */ +static int __fake_master_get(struct vme_master_resource *image, int *enabled, + unsigned long long *vme_base, unsigned long long *size, + u32 *aspace, u32 *cycle, u32 *dwidth) +{ + unsigned int i; + struct fake_driver *bridge; + + bridge = image->parent->driver_priv; + + i = image->number; + + *enabled = bridge->masters[i].enabled; + *vme_base = bridge->masters[i].vme_base; + *size = bridge->masters[i].size; + *aspace = bridge->masters[i].aspace; + *cycle = bridge->masters[i].cycle; + *dwidth = bridge->masters[i].dwidth; + + return 0; +} + + +static int fake_master_get(struct vme_master_resource *image, int *enabled, + unsigned long long *vme_base, unsigned long long *size, + u32 *aspace, u32 *cycle, u32 *dwidth) +{ + int retval; + + spin_lock(&image->lock); + + retval = __fake_master_get(image, enabled, vme_base, size, aspace, + cycle, dwidth); + + spin_unlock(&image->lock); + + return retval; +} + + +static void fake_lm_check(struct fake_driver *bridge, unsigned long long addr, + u32 aspace, u32 cycle) +{ + struct vme_bridge *fake_bridge; + unsigned long long lm_base; + u32 lm_aspace, lm_cycle; + int i; + struct vme_lm_resource *lm; + struct list_head *pos = NULL, *n; + + /* Get vme_bridge */ + fake_bridge = bridge->parent; + + /* Loop through each location monitor resource */ + list_for_each_safe(pos, n, &fake_bridge->lm_resources) { + lm = list_entry(pos, struct vme_lm_resource, list); + + /* If disabled, we're done */ + if (bridge->lm_enabled == 0) + return; + + lm_base = bridge->lm_base; + lm_aspace = bridge->lm_aspace; + lm_cycle = bridge->lm_cycle; + + /* First make sure that the cycle and address space match */ + if ((lm_aspace == aspace) && (lm_cycle == cycle)) { + for (i = 0; i < lm->monitors; i++) { + /* Each location monitor covers 8 bytes */ + if (((lm_base + (8 * i)) <= addr) && + ((lm_base + (8 * i) + 8) > addr)) { + if (bridge->lm_callback[i]) + bridge->lm_callback[i]( + bridge->lm_data[i]); + } + } + } + } +} + +static noinline_for_stack u8 fake_vmeread8(struct fake_driver *bridge, + unsigned long long addr, + u32 aspace, u32 cycle) +{ + u8 retval = 0xff; + int i; + unsigned long long start, end, offset; + u8 *loc; + + for (i = 0; i < FAKE_MAX_SLAVE; i++) { + start = bridge->slaves[i].vme_base; + end = bridge->slaves[i].vme_base + bridge->slaves[i].size; + + if (aspace != bridge->slaves[i].aspace) + continue; + + if (cycle != bridge->slaves[i].cycle) + continue; + + if ((addr >= start) && (addr < end)) { + offset = addr - bridge->slaves[i].vme_base; + loc = (u8 *)(bridge->slaves[i].buf_base + offset); + retval = *loc; + + break; + } + } + + fake_lm_check(bridge, addr, aspace, cycle); + + return retval; +} + +static noinline_for_stack u16 fake_vmeread16(struct fake_driver *bridge, + unsigned long long addr, + u32 aspace, u32 cycle) +{ + u16 retval = 0xffff; + int i; + unsigned long long start, end, offset; + u16 *loc; + + for (i = 0; i < FAKE_MAX_SLAVE; i++) { + if (aspace != bridge->slaves[i].aspace) + continue; + + if (cycle != bridge->slaves[i].cycle) + continue; + + start = bridge->slaves[i].vme_base; + end = bridge->slaves[i].vme_base + bridge->slaves[i].size; + + if ((addr >= start) && ((addr + 1) < end)) { + offset = addr - bridge->slaves[i].vme_base; + loc = (u16 *)(bridge->slaves[i].buf_base + offset); + retval = *loc; + + break; + } + } + + fake_lm_check(bridge, addr, aspace, cycle); + + return retval; +} + +static noinline_for_stack u32 fake_vmeread32(struct fake_driver *bridge, + unsigned long long addr, + u32 aspace, u32 cycle) +{ + u32 retval = 0xffffffff; + int i; + unsigned long long start, end, offset; + u32 *loc; + + for (i = 0; i < FAKE_MAX_SLAVE; i++) { + if (aspace != bridge->slaves[i].aspace) + continue; + + if (cycle != bridge->slaves[i].cycle) + continue; + + start = bridge->slaves[i].vme_base; + end = bridge->slaves[i].vme_base + bridge->slaves[i].size; + + if ((addr >= start) && ((addr + 3) < end)) { + offset = addr - bridge->slaves[i].vme_base; + loc = (u32 *)(bridge->slaves[i].buf_base + offset); + retval = *loc; + + break; + } + } + + fake_lm_check(bridge, addr, aspace, cycle); + + return retval; +} + +static ssize_t fake_master_read(struct vme_master_resource *image, void *buf, + size_t count, loff_t offset) +{ + int retval; + u32 aspace, cycle, dwidth; + struct vme_bridge *fake_bridge; + struct fake_driver *priv; + int i; + unsigned long long addr; + unsigned int done = 0; + unsigned int count32; + + fake_bridge = image->parent; + + priv = fake_bridge->driver_priv; + + i = image->number; + + addr = (unsigned long long)priv->masters[i].vme_base + offset; + aspace = priv->masters[i].aspace; + cycle = priv->masters[i].cycle; + dwidth = priv->masters[i].dwidth; + + spin_lock(&image->lock); + + /* The following code handles VME address alignment. We cannot use + * memcpy_xxx here because it may cut data transfers in to 8-bit + * cycles when D16 or D32 cycles are required on the VME bus. + * On the other hand, the bridge itself assures that the maximum data + * cycle configured for the transfer is used and splits it + * automatically for non-aligned addresses, so we don't want the + * overhead of needlessly forcing small transfers for the entire cycle. + */ + if (addr & 0x1) { + *(u8 *)buf = fake_vmeread8(priv, addr, aspace, cycle); + done += 1; + if (done == count) + goto out; + } + if ((dwidth == VME_D16) || (dwidth == VME_D32)) { + if ((addr + done) & 0x2) { + if ((count - done) < 2) { + *(u8 *)(buf + done) = fake_vmeread8(priv, + addr + done, aspace, cycle); + done += 1; + goto out; + } else { + *(u16 *)(buf + done) = fake_vmeread16(priv, + addr + done, aspace, cycle); + done += 2; + } + } + } + + if (dwidth == VME_D32) { + count32 = (count - done) & ~0x3; + while (done < count32) { + *(u32 *)(buf + done) = fake_vmeread32(priv, addr + done, + aspace, cycle); + done += 4; + } + } else if (dwidth == VME_D16) { + count32 = (count - done) & ~0x3; + while (done < count32) { + *(u16 *)(buf + done) = fake_vmeread16(priv, addr + done, + aspace, cycle); + done += 2; + } + } else if (dwidth == VME_D8) { + count32 = (count - done); + while (done < count32) { + *(u8 *)(buf + done) = fake_vmeread8(priv, addr + done, + aspace, cycle); + done += 1; + } + + } + + if ((dwidth == VME_D16) || (dwidth == VME_D32)) { + if ((count - done) & 0x2) { + *(u16 *)(buf + done) = fake_vmeread16(priv, addr + done, + aspace, cycle); + done += 2; + } + } + if ((count - done) & 0x1) { + *(u8 *)(buf + done) = fake_vmeread8(priv, addr + done, aspace, + cycle); + done += 1; + } + +out: + retval = count; + + spin_unlock(&image->lock); + + return retval; +} + +static noinline_for_stack void fake_vmewrite8(struct fake_driver *bridge, + u8 *buf, unsigned long long addr, + u32 aspace, u32 cycle) +{ + int i; + unsigned long long start, end, offset; + u8 *loc; + + for (i = 0; i < FAKE_MAX_SLAVE; i++) { + if (aspace != bridge->slaves[i].aspace) + continue; + + if (cycle != bridge->slaves[i].cycle) + continue; + + start = bridge->slaves[i].vme_base; + end = bridge->slaves[i].vme_base + bridge->slaves[i].size; + + if ((addr >= start) && (addr < end)) { + offset = addr - bridge->slaves[i].vme_base; + loc = (u8 *)((void *)bridge->slaves[i].buf_base + offset); + *loc = *buf; + + break; + } + } + + fake_lm_check(bridge, addr, aspace, cycle); + +} + +static noinline_for_stack void fake_vmewrite16(struct fake_driver *bridge, + u16 *buf, unsigned long long addr, + u32 aspace, u32 cycle) +{ + int i; + unsigned long long start, end, offset; + u16 *loc; + + for (i = 0; i < FAKE_MAX_SLAVE; i++) { + if (aspace != bridge->slaves[i].aspace) + continue; + + if (cycle != bridge->slaves[i].cycle) + continue; + + start = bridge->slaves[i].vme_base; + end = bridge->slaves[i].vme_base + bridge->slaves[i].size; + + if ((addr >= start) && ((addr + 1) < end)) { + offset = addr - bridge->slaves[i].vme_base; + loc = (u16 *)((void *)bridge->slaves[i].buf_base + offset); + *loc = *buf; + + break; + } + } + + fake_lm_check(bridge, addr, aspace, cycle); + +} + +static noinline_for_stack void fake_vmewrite32(struct fake_driver *bridge, + u32 *buf, unsigned long long addr, + u32 aspace, u32 cycle) +{ + int i; + unsigned long long start, end, offset; + u32 *loc; + + for (i = 0; i < FAKE_MAX_SLAVE; i++) { + if (aspace != bridge->slaves[i].aspace) + continue; + + if (cycle != bridge->slaves[i].cycle) + continue; + + start = bridge->slaves[i].vme_base; + end = bridge->slaves[i].vme_base + bridge->slaves[i].size; + + if ((addr >= start) && ((addr + 3) < end)) { + offset = addr - bridge->slaves[i].vme_base; + loc = (u32 *)((void *)bridge->slaves[i].buf_base + offset); + *loc = *buf; + + break; + } + } + + fake_lm_check(bridge, addr, aspace, cycle); + +} + +static ssize_t fake_master_write(struct vme_master_resource *image, void *buf, + size_t count, loff_t offset) +{ + int retval = 0; + u32 aspace, cycle, dwidth; + unsigned long long addr; + int i; + unsigned int done = 0; + unsigned int count32; + + struct vme_bridge *fake_bridge; + struct fake_driver *bridge; + + fake_bridge = image->parent; + + bridge = fake_bridge->driver_priv; + + i = image->number; + + addr = bridge->masters[i].vme_base + offset; + aspace = bridge->masters[i].aspace; + cycle = bridge->masters[i].cycle; + dwidth = bridge->masters[i].dwidth; + + spin_lock(&image->lock); + + /* Here we apply for the same strategy we do in master_read + * function in order to assure the correct cycles. + */ + if (addr & 0x1) { + fake_vmewrite8(bridge, (u8 *)buf, addr, aspace, cycle); + done += 1; + if (done == count) + goto out; + } + + if ((dwidth == VME_D16) || (dwidth == VME_D32)) { + if ((addr + done) & 0x2) { + if ((count - done) < 2) { + fake_vmewrite8(bridge, (u8 *)(buf + done), + addr + done, aspace, cycle); + done += 1; + goto out; + } else { + fake_vmewrite16(bridge, (u16 *)(buf + done), + addr + done, aspace, cycle); + done += 2; + } + } + } + + if (dwidth == VME_D32) { + count32 = (count - done) & ~0x3; + while (done < count32) { + fake_vmewrite32(bridge, (u32 *)(buf + done), + addr + done, aspace, cycle); + done += 4; + } + } else if (dwidth == VME_D16) { + count32 = (count - done) & ~0x3; + while (done < count32) { + fake_vmewrite16(bridge, (u16 *)(buf + done), + addr + done, aspace, cycle); + done += 2; + } + } else if (dwidth == VME_D8) { + count32 = (count - done); + while (done < count32) { + fake_vmewrite8(bridge, (u8 *)(buf + done), addr + done, + aspace, cycle); + done += 1; + } + + } + + if ((dwidth == VME_D16) || (dwidth == VME_D32)) { + if ((count - done) & 0x2) { + fake_vmewrite16(bridge, (u16 *)(buf + done), + addr + done, aspace, cycle); + done += 2; + } + } + + if ((count - done) & 0x1) { + fake_vmewrite8(bridge, (u8 *)(buf + done), addr + done, aspace, + cycle); + done += 1; + } + +out: + retval = count; + + spin_unlock(&image->lock); + + return retval; +} + +/* + * Perform an RMW cycle on the VME bus. + * + * Requires a previously configured master window, returns final value. + */ +static unsigned int fake_master_rmw(struct vme_master_resource *image, + unsigned int mask, unsigned int compare, unsigned int swap, + loff_t offset) +{ + u32 tmp, base; + u32 aspace, cycle; + int i; + struct fake_driver *bridge; + + bridge = image->parent->driver_priv; + + /* Find the PCI address that maps to the desired VME address */ + i = image->number; + + base = bridge->masters[i].vme_base; + aspace = bridge->masters[i].aspace; + cycle = bridge->masters[i].cycle; + + /* Lock image */ + spin_lock(&image->lock); + + /* Read existing value */ + tmp = fake_vmeread32(bridge, base + offset, aspace, cycle); + + /* Perform check */ + if ((tmp && mask) == (compare && mask)) { + tmp = tmp | (mask | swap); + tmp = tmp & (~mask | swap); + + /* Write back */ + fake_vmewrite32(bridge, &tmp, base + offset, aspace, cycle); + } + + /* Unlock image */ + spin_unlock(&image->lock); + + return tmp; +} + +/* + * All 4 location monitors reside at the same base - this is therefore a + * system wide configuration. + * + * This does not enable the LM monitor - that should be done when the first + * callback is attached and disabled when the last callback is removed. + */ +static int fake_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base, + u32 aspace, u32 cycle) +{ + int i; + struct vme_bridge *fake_bridge; + struct fake_driver *bridge; + + fake_bridge = lm->parent; + + bridge = fake_bridge->driver_priv; + + mutex_lock(&lm->mtx); + + /* If we already have a callback attached, we can't move it! */ + for (i = 0; i < lm->monitors; i++) { + if (bridge->lm_callback[i]) { + mutex_unlock(&lm->mtx); + pr_err("Location monitor callback attached, can't reset\n"); + return -EBUSY; + } + } + + switch (aspace) { + case VME_A16: + case VME_A24: + case VME_A32: + case VME_A64: + break; + default: + mutex_unlock(&lm->mtx); + pr_err("Invalid address space\n"); + return -EINVAL; + } + + bridge->lm_base = lm_base; + bridge->lm_aspace = aspace; + bridge->lm_cycle = cycle; + + mutex_unlock(&lm->mtx); + + return 0; +} + +/* Get configuration of the callback monitor and return whether it is enabled + * or disabled. + */ +static int fake_lm_get(struct vme_lm_resource *lm, + unsigned long long *lm_base, u32 *aspace, u32 *cycle) +{ + struct fake_driver *bridge; + + bridge = lm->parent->driver_priv; + + mutex_lock(&lm->mtx); + + *lm_base = bridge->lm_base; + *aspace = bridge->lm_aspace; + *cycle = bridge->lm_cycle; + + mutex_unlock(&lm->mtx); + + return bridge->lm_enabled; +} + +/* + * Attach a callback to a specific location monitor. + * + * Callback will be passed the monitor triggered. + */ +static int fake_lm_attach(struct vme_lm_resource *lm, int monitor, + void (*callback)(void *), void *data) +{ + struct vme_bridge *fake_bridge; + struct fake_driver *bridge; + + fake_bridge = lm->parent; + + bridge = fake_bridge->driver_priv; + + mutex_lock(&lm->mtx); + + /* Ensure that the location monitor is configured - need PGM or DATA */ + if (bridge->lm_cycle == 0) { + mutex_unlock(&lm->mtx); + pr_err("Location monitor not properly configured\n"); + return -EINVAL; + } + + /* Check that a callback isn't already attached */ + if (bridge->lm_callback[monitor]) { + mutex_unlock(&lm->mtx); + pr_err("Existing callback attached\n"); + return -EBUSY; + } + + /* Attach callback */ + bridge->lm_callback[monitor] = callback; + bridge->lm_data[monitor] = data; + + /* Ensure that global Location Monitor Enable set */ + bridge->lm_enabled = 1; + + mutex_unlock(&lm->mtx); + + return 0; +} + +/* + * Detach a callback function forn a specific location monitor. + */ +static int fake_lm_detach(struct vme_lm_resource *lm, int monitor) +{ + u32 tmp; + int i; + struct fake_driver *bridge; + + bridge = lm->parent->driver_priv; + + mutex_lock(&lm->mtx); + + /* Detach callback */ + bridge->lm_callback[monitor] = NULL; + bridge->lm_data[monitor] = NULL; + + /* If all location monitors disabled, disable global Location Monitor */ + tmp = 0; + for (i = 0; i < lm->monitors; i++) { + if (bridge->lm_callback[i]) + tmp = 1; + } + + if (tmp == 0) + bridge->lm_enabled = 0; + + mutex_unlock(&lm->mtx); + + return 0; +} + +/* + * Determine Geographical Addressing + */ +static int fake_slot_get(struct vme_bridge *fake_bridge) +{ + return geoid; +} + +static void *fake_alloc_consistent(struct device *parent, size_t size, + dma_addr_t *dma) +{ + void *alloc = kmalloc(size, GFP_KERNEL); + + if (alloc) + *dma = fake_ptr_to_pci(alloc); + + return alloc; +} + +static void fake_free_consistent(struct device *parent, size_t size, + void *vaddr, dma_addr_t dma) +{ + kfree(vaddr); +/* + dma_free_coherent(parent, size, vaddr, dma); +*/ +} + +/* + * Configure CR/CSR space + * + * Access to the CR/CSR can be configured at power-up. The location of the + * CR/CSR registers in the CR/CSR address space is determined by the boards + * Geographic address. + * + * Each board has a 512kB window, with the highest 4kB being used for the + * boards registers, this means there is a fix length 508kB window which must + * be mapped onto PCI memory. + */ +static int fake_crcsr_init(struct vme_bridge *fake_bridge) +{ + u32 vstat; + struct fake_driver *bridge; + + bridge = fake_bridge->driver_priv; + + /* Allocate mem for CR/CSR image */ + bridge->crcsr_kernel = kzalloc(VME_CRCSR_BUF_SIZE, GFP_KERNEL); + bridge->crcsr_bus = fake_ptr_to_pci(bridge->crcsr_kernel); + if (!bridge->crcsr_kernel) + return -ENOMEM; + + vstat = fake_slot_get(fake_bridge); + + pr_info("CR/CSR Offset: %d\n", vstat); + + return 0; +} + +static void fake_crcsr_exit(struct vme_bridge *fake_bridge) +{ + struct fake_driver *bridge; + + bridge = fake_bridge->driver_priv; + + kfree(bridge->crcsr_kernel); +} + + +static int __init fake_init(void) +{ + int retval, i; + struct list_head *pos = NULL, *n; + struct vme_bridge *fake_bridge; + struct fake_driver *fake_device; + struct vme_master_resource *master_image; + struct vme_slave_resource *slave_image; + struct vme_lm_resource *lm; + + /* We need a fake parent device */ + vme_root = __root_device_register("vme", THIS_MODULE); + + /* If we want to support more than one bridge at some point, we need to + * dynamically allocate this so we get one per device. + */ + fake_bridge = kzalloc(sizeof(*fake_bridge), GFP_KERNEL); + if (!fake_bridge) { + retval = -ENOMEM; + goto err_struct; + } + + fake_device = kzalloc(sizeof(*fake_device), GFP_KERNEL); + if (!fake_device) { + retval = -ENOMEM; + goto err_driver; + } + + fake_bridge->driver_priv = fake_device; + + fake_bridge->parent = vme_root; + + fake_device->parent = fake_bridge; + + /* Initialize wait queues & mutual exclusion flags */ + mutex_init(&fake_device->vme_int); + mutex_init(&fake_bridge->irq_mtx); + tasklet_init(&fake_device->int_tasklet, fake_VIRQ_tasklet, + (unsigned long) fake_bridge); + + strcpy(fake_bridge->name, driver_name); + + /* Add master windows to list */ + INIT_LIST_HEAD(&fake_bridge->master_resources); + for (i = 0; i < FAKE_MAX_MASTER; i++) { + master_image = kmalloc(sizeof(*master_image), GFP_KERNEL); + if (!master_image) { + retval = -ENOMEM; + goto err_master; + } + master_image->parent = fake_bridge; + spin_lock_init(&master_image->lock); + master_image->locked = 0; + master_image->number = i; + master_image->address_attr = VME_A16 | VME_A24 | VME_A32 | + VME_A64; + master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT | + VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 | + VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER | + VME_PROG | VME_DATA; + master_image->width_attr = VME_D16 | VME_D32; + memset(&master_image->bus_resource, 0, + sizeof(struct resource)); + master_image->kern_base = NULL; + list_add_tail(&master_image->list, + &fake_bridge->master_resources); + } + + /* Add slave windows to list */ + INIT_LIST_HEAD(&fake_bridge->slave_resources); + for (i = 0; i < FAKE_MAX_SLAVE; i++) { + slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL); + if (!slave_image) { + retval = -ENOMEM; + goto err_slave; + } + slave_image->parent = fake_bridge; + mutex_init(&slave_image->mtx); + slave_image->locked = 0; + slave_image->number = i; + slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 | + VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 | + VME_USER3 | VME_USER4; + slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT | + VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 | + VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER | + VME_PROG | VME_DATA; + list_add_tail(&slave_image->list, + &fake_bridge->slave_resources); + } + + /* Add location monitor to list */ + INIT_LIST_HEAD(&fake_bridge->lm_resources); + lm = kmalloc(sizeof(*lm), GFP_KERNEL); + if (!lm) { + retval = -ENOMEM; + goto err_lm; + } + lm->parent = fake_bridge; + mutex_init(&lm->mtx); + lm->locked = 0; + lm->number = 1; + lm->monitors = 4; + list_add_tail(&lm->list, &fake_bridge->lm_resources); + + fake_bridge->slave_get = fake_slave_get; + fake_bridge->slave_set = fake_slave_set; + fake_bridge->master_get = fake_master_get; + fake_bridge->master_set = fake_master_set; + fake_bridge->master_read = fake_master_read; + fake_bridge->master_write = fake_master_write; + fake_bridge->master_rmw = fake_master_rmw; + fake_bridge->irq_set = fake_irq_set; + fake_bridge->irq_generate = fake_irq_generate; + fake_bridge->lm_set = fake_lm_set; + fake_bridge->lm_get = fake_lm_get; + fake_bridge->lm_attach = fake_lm_attach; + fake_bridge->lm_detach = fake_lm_detach; + fake_bridge->slot_get = fake_slot_get; + fake_bridge->alloc_consistent = fake_alloc_consistent; + fake_bridge->free_consistent = fake_free_consistent; + + pr_info("Board is%s the VME system controller\n", + (geoid == 1) ? "" : " not"); + + pr_info("VME geographical address is set to %d\n", geoid); + + retval = fake_crcsr_init(fake_bridge); + if (retval) { + pr_err("CR/CSR configuration failed.\n"); + goto err_crcsr; + } + + retval = vme_register_bridge(fake_bridge); + if (retval != 0) { + pr_err("Chip Registration failed.\n"); + goto err_reg; + } + + exit_pointer = fake_bridge; + + return 0; + +err_reg: + fake_crcsr_exit(fake_bridge); +err_crcsr: +err_lm: + /* resources are stored in link list */ + list_for_each_safe(pos, n, &fake_bridge->lm_resources) { + lm = list_entry(pos, struct vme_lm_resource, list); + list_del(pos); + kfree(lm); + } +err_slave: + /* resources are stored in link list */ + list_for_each_safe(pos, n, &fake_bridge->slave_resources) { + slave_image = list_entry(pos, struct vme_slave_resource, list); + list_del(pos); + kfree(slave_image); + } +err_master: + /* resources are stored in link list */ + list_for_each_safe(pos, n, &fake_bridge->master_resources) { + master_image = list_entry(pos, struct vme_master_resource, + list); + list_del(pos); + kfree(master_image); + } + + kfree(fake_device); +err_driver: + kfree(fake_bridge); +err_struct: + return retval; + +} + + +static void __exit fake_exit(void) +{ + struct list_head *pos = NULL; + struct list_head *tmplist; + struct vme_master_resource *master_image; + struct vme_slave_resource *slave_image; + int i; + struct vme_bridge *fake_bridge; + struct fake_driver *bridge; + + fake_bridge = exit_pointer; + + bridge = fake_bridge->driver_priv; + + pr_debug("Driver is being unloaded.\n"); + + /* + * Shutdown all inbound and outbound windows. + */ + for (i = 0; i < FAKE_MAX_MASTER; i++) + bridge->masters[i].enabled = 0; + + for (i = 0; i < FAKE_MAX_SLAVE; i++) + bridge->slaves[i].enabled = 0; + + /* + * Shutdown Location monitor. + */ + bridge->lm_enabled = 0; + + vme_unregister_bridge(fake_bridge); + + fake_crcsr_exit(fake_bridge); + /* resources are stored in link list */ + list_for_each_safe(pos, tmplist, &fake_bridge->slave_resources) { + slave_image = list_entry(pos, struct vme_slave_resource, list); + list_del(pos); + kfree(slave_image); + } + + /* resources are stored in link list */ + list_for_each_safe(pos, tmplist, &fake_bridge->master_resources) { + master_image = list_entry(pos, struct vme_master_resource, + list); + list_del(pos); + kfree(master_image); + } + + kfree(fake_bridge->driver_priv); + + kfree(fake_bridge); + + root_device_unregister(vme_root); +} + + +MODULE_PARM_DESC(geoid, "Set geographical addressing"); +module_param(geoid, int, 0); + +MODULE_DESCRIPTION("Fake VME bridge driver"); +MODULE_LICENSE("GPL"); + +module_init(fake_init); +module_exit(fake_exit); diff --git a/drivers/staging/vme_user/vme_tsi148.c b/drivers/staging/vme_user/vme_tsi148.c new file mode 100644 index 000000000000..956476213241 --- /dev/null +++ b/drivers/staging/vme_user/vme_tsi148.c @@ -0,0 +1,2661 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Support for the Tundra TSI148 VME-PCI Bridge Chip + * + * Author: Martyn Welch <martyn.welch@ge.com> + * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. + * + * Based on work by Tom Armistead and Ajit Prem + * Copyright 2004 Motorola Inc. + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/mm.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/proc_fs.h> +#include <linux/pci.h> +#include <linux/poll.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/time.h> +#include <linux/io.h> +#include <linux/uaccess.h> +#include <linux/byteorder/generic.h> + +#include "vme.h" +#include "vme_bridge.h" +#include "vme_tsi148.h" + +static int tsi148_probe(struct pci_dev *, const struct pci_device_id *); +static void tsi148_remove(struct pci_dev *); + + +/* Module parameter */ +static bool err_chk; +static int geoid; + +static const char driver_name[] = "vme_tsi148"; + +static const struct pci_device_id tsi148_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) }, + { }, +}; + +MODULE_DEVICE_TABLE(pci, tsi148_ids); + +static struct pci_driver tsi148_driver = { + .name = driver_name, + .id_table = tsi148_ids, + .probe = tsi148_probe, + .remove = tsi148_remove, +}; + +static void reg_join(unsigned int high, unsigned int low, + unsigned long long *variable) +{ + *variable = (unsigned long long)high << 32; + *variable |= (unsigned long long)low; +} + +static void reg_split(unsigned long long variable, unsigned int *high, + unsigned int *low) +{ + *low = (unsigned int)variable & 0xFFFFFFFF; + *high = (unsigned int)(variable >> 32); +} + +/* + * Wakes up DMA queue. + */ +static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge, + int channel_mask) +{ + u32 serviced = 0; + + if (channel_mask & TSI148_LCSR_INTS_DMA0S) { + wake_up(&bridge->dma_queue[0]); + serviced |= TSI148_LCSR_INTC_DMA0C; + } + if (channel_mask & TSI148_LCSR_INTS_DMA1S) { + wake_up(&bridge->dma_queue[1]); + serviced |= TSI148_LCSR_INTC_DMA1C; + } + + return serviced; +} + +/* + * Wake up location monitor queue + */ +static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat) +{ + int i; + u32 serviced = 0; + + for (i = 0; i < 4; i++) { + if (stat & TSI148_LCSR_INTS_LMS[i]) { + /* We only enable interrupts if the callback is set */ + bridge->lm_callback[i](bridge->lm_data[i]); + serviced |= TSI148_LCSR_INTC_LMC[i]; + } + } + + return serviced; +} + +/* + * Wake up mail box queue. + * + * XXX This functionality is not exposed up though API. + */ +static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat) +{ + int i; + u32 val; + u32 serviced = 0; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + for (i = 0; i < 4; i++) { + if (stat & TSI148_LCSR_INTS_MBS[i]) { + val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]); + dev_err(tsi148_bridge->parent, "VME Mailbox %d received" + ": 0x%x\n", i, val); + serviced |= TSI148_LCSR_INTC_MBC[i]; + } + } + + return serviced; +} + +/* + * Display error & status message when PERR (PCI) exception interrupt occurs. + */ +static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge) +{ + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, " + "attributes: %08x\n", + ioread32be(bridge->base + TSI148_LCSR_EDPAU), + ioread32be(bridge->base + TSI148_LCSR_EDPAL), + ioread32be(bridge->base + TSI148_LCSR_EDPAT)); + + dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split " + "completion reg: %08x\n", + ioread32be(bridge->base + TSI148_LCSR_EDPXA), + ioread32be(bridge->base + TSI148_LCSR_EDPXS)); + + iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT); + + return TSI148_LCSR_INTC_PERRC; +} + +/* + * Save address and status when VME error interrupt occurs. + */ +static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge) +{ + unsigned int error_addr_high, error_addr_low; + unsigned long long error_addr; + u32 error_attrib; + int error_am; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU); + error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL); + error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT); + error_am = (error_attrib & TSI148_LCSR_VEAT_AM_M) >> 8; + + reg_join(error_addr_high, error_addr_low, &error_addr); + + /* Check for exception register overflow (we have lost error data) */ + if (error_attrib & TSI148_LCSR_VEAT_VEOF) { + dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow " + "Occurred\n"); + } + + if (err_chk) + vme_bus_error_handler(tsi148_bridge, error_addr, error_am); + else + dev_err(tsi148_bridge->parent, + "VME Bus Error at address: 0x%llx, attributes: %08x\n", + error_addr, error_attrib); + + /* Clear Status */ + iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT); + + return TSI148_LCSR_INTC_VERRC; +} + +/* + * Wake up IACK queue. + */ +static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge) +{ + wake_up(&bridge->iack_queue); + + return TSI148_LCSR_INTC_IACKC; +} + +/* + * Calling VME bus interrupt callback if provided. + */ +static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge, + u32 stat) +{ + int vec, i, serviced = 0; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + for (i = 7; i > 0; i--) { + if (stat & (1 << i)) { + /* + * Note: Even though the registers are defined as + * 32-bits in the spec, we only want to issue 8-bit + * IACK cycles on the bus, read from offset 3. + */ + vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3); + + vme_irq_handler(tsi148_bridge, i, vec); + + serviced |= (1 << i); + } + } + + return serviced; +} + +/* + * Top level interrupt handler. Clears appropriate interrupt status bits and + * then calls appropriate sub handler(s). + */ +static irqreturn_t tsi148_irqhandler(int irq, void *ptr) +{ + u32 stat, enable, serviced = 0; + struct vme_bridge *tsi148_bridge; + struct tsi148_driver *bridge; + + tsi148_bridge = ptr; + + bridge = tsi148_bridge->driver_priv; + + /* Determine which interrupts are unmasked and set */ + enable = ioread32be(bridge->base + TSI148_LCSR_INTEO); + stat = ioread32be(bridge->base + TSI148_LCSR_INTS); + + /* Only look at unmasked interrupts */ + stat &= enable; + + if (unlikely(!stat)) + return IRQ_NONE; + + /* Call subhandlers as appropriate */ + /* DMA irqs */ + if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S)) + serviced |= tsi148_DMA_irqhandler(bridge, stat); + + /* Location monitor irqs */ + if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S | + TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S)) + serviced |= tsi148_LM_irqhandler(bridge, stat); + + /* Mail box irqs */ + if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S | + TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S)) + serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat); + + /* PCI bus error */ + if (stat & TSI148_LCSR_INTS_PERRS) + serviced |= tsi148_PERR_irqhandler(tsi148_bridge); + + /* VME bus error */ + if (stat & TSI148_LCSR_INTS_VERRS) + serviced |= tsi148_VERR_irqhandler(tsi148_bridge); + + /* IACK irq */ + if (stat & TSI148_LCSR_INTS_IACKS) + serviced |= tsi148_IACK_irqhandler(bridge); + + /* VME bus irqs */ + if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S | + TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S | + TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S | + TSI148_LCSR_INTS_IRQ1S)) + serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat); + + /* Clear serviced interrupts */ + iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC); + + return IRQ_HANDLED; +} + +static int tsi148_irq_init(struct vme_bridge *tsi148_bridge) +{ + int result; + unsigned int tmp; + struct pci_dev *pdev; + struct tsi148_driver *bridge; + + pdev = to_pci_dev(tsi148_bridge->parent); + + bridge = tsi148_bridge->driver_priv; + + result = request_irq(pdev->irq, + tsi148_irqhandler, + IRQF_SHARED, + driver_name, tsi148_bridge); + if (result) { + dev_err(tsi148_bridge->parent, "Can't get assigned pci irq " + "vector %02X\n", pdev->irq); + return result; + } + + /* Enable and unmask interrupts */ + tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO | + TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO | + TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO | + TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO | + TSI148_LCSR_INTEO_IACKEO; + + /* This leaves the following interrupts masked. + * TSI148_LCSR_INTEO_VIEEO + * TSI148_LCSR_INTEO_SYSFLEO + * TSI148_LCSR_INTEO_ACFLEO + */ + + /* Don't enable Location Monitor interrupts here - they will be + * enabled when the location monitors are properly configured and + * a callback has been attached. + * TSI148_LCSR_INTEO_LM0EO + * TSI148_LCSR_INTEO_LM1EO + * TSI148_LCSR_INTEO_LM2EO + * TSI148_LCSR_INTEO_LM3EO + */ + + /* Don't enable VME interrupts until we add a handler, else the board + * will respond to it and we don't want that unless it knows how to + * properly deal with it. + * TSI148_LCSR_INTEO_IRQ7EO + * TSI148_LCSR_INTEO_IRQ6EO + * TSI148_LCSR_INTEO_IRQ5EO + * TSI148_LCSR_INTEO_IRQ4EO + * TSI148_LCSR_INTEO_IRQ3EO + * TSI148_LCSR_INTEO_IRQ2EO + * TSI148_LCSR_INTEO_IRQ1EO + */ + + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN); + + return 0; +} + +static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge, + struct pci_dev *pdev) +{ + struct tsi148_driver *bridge = tsi148_bridge->driver_priv; + + /* Turn off interrupts */ + iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO); + iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN); + + /* Clear all interrupts */ + iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC); + + /* Detach interrupt handler */ + free_irq(pdev->irq, tsi148_bridge); +} + +/* + * Check to see if an IACk has been received, return true (1) or false (0). + */ +static int tsi148_iack_received(struct tsi148_driver *bridge) +{ + u32 tmp; + + tmp = ioread32be(bridge->base + TSI148_LCSR_VICR); + + if (tmp & TSI148_LCSR_VICR_IRQS) + return 0; + else + return 1; +} + +/* + * Configure VME interrupt + */ +static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level, + int state, int sync) +{ + struct pci_dev *pdev; + u32 tmp; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + /* We need to do the ordering differently for enabling and disabling */ + if (state == 0) { + tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN); + tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1]; + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN); + + tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO); + tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1]; + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); + + if (sync != 0) { + pdev = to_pci_dev(tsi148_bridge->parent); + synchronize_irq(pdev->irq); + } + } else { + tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO); + tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1]; + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); + + tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN); + tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1]; + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN); + } +} + +/* + * Generate a VME bus interrupt at the requested level & vector. Wait for + * interrupt to be acked. + */ +static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, + int statid) +{ + u32 tmp; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + mutex_lock(&bridge->vme_int); + + /* Read VICR register */ + tmp = ioread32be(bridge->base + TSI148_LCSR_VICR); + + /* Set Status/ID */ + tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) | + (statid & TSI148_LCSR_VICR_STID_M); + iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR); + + /* Assert VMEbus IRQ */ + tmp = tmp | TSI148_LCSR_VICR_IRQL[level]; + iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR); + + /* XXX Consider implementing a timeout? */ + wait_event_interruptible(bridge->iack_queue, + tsi148_iack_received(bridge)); + + mutex_unlock(&bridge->vme_int); + + return 0; +} + +/* + * Initialize a slave window with the requested attributes. + */ +static int tsi148_slave_set(struct vme_slave_resource *image, int enabled, + unsigned long long vme_base, unsigned long long size, + dma_addr_t pci_base, u32 aspace, u32 cycle) +{ + unsigned int i, addr = 0, granularity = 0; + unsigned int temp_ctl = 0; + unsigned int vme_base_low, vme_base_high; + unsigned int vme_bound_low, vme_bound_high; + unsigned int pci_offset_low, pci_offset_high; + unsigned long long vme_bound, pci_offset; + struct vme_bridge *tsi148_bridge; + struct tsi148_driver *bridge; + + tsi148_bridge = image->parent; + bridge = tsi148_bridge->driver_priv; + + i = image->number; + + switch (aspace) { + case VME_A16: + granularity = 0x10; + addr |= TSI148_LCSR_ITAT_AS_A16; + break; + case VME_A24: + granularity = 0x1000; + addr |= TSI148_LCSR_ITAT_AS_A24; + break; + case VME_A32: + granularity = 0x10000; + addr |= TSI148_LCSR_ITAT_AS_A32; + break; + case VME_A64: + granularity = 0x10000; + addr |= TSI148_LCSR_ITAT_AS_A64; + break; + default: + dev_err(tsi148_bridge->parent, "Invalid address space\n"); + return -EINVAL; + } + + /* Convert 64-bit variables to 2x 32-bit variables */ + reg_split(vme_base, &vme_base_high, &vme_base_low); + + /* + * Bound address is a valid address for the window, adjust + * accordingly + */ + vme_bound = vme_base + size - granularity; + reg_split(vme_bound, &vme_bound_high, &vme_bound_low); + pci_offset = (unsigned long long)pci_base - vme_base; + reg_split(pci_offset, &pci_offset_high, &pci_offset_low); + + if (vme_base_low & (granularity - 1)) { + dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n"); + return -EINVAL; + } + if (vme_bound_low & (granularity - 1)) { + dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n"); + return -EINVAL; + } + if (pci_offset_low & (granularity - 1)) { + dev_err(tsi148_bridge->parent, "Invalid PCI Offset " + "alignment\n"); + return -EINVAL; + } + + /* Disable while we are mucking around */ + temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITAT); + temp_ctl &= ~TSI148_LCSR_ITAT_EN; + iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITAT); + + /* Setup mapping */ + iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITSAU); + iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITSAL); + iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITEAU); + iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITEAL); + iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITOFU); + iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITOFL); + + /* Setup 2eSST speeds */ + temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M; + switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) { + case VME_2eSST160: + temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160; + break; + case VME_2eSST267: + temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267; + break; + case VME_2eSST320: + temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320; + break; + } + + /* Setup cycle types */ + temp_ctl &= ~(0x1F << 7); + if (cycle & VME_BLT) + temp_ctl |= TSI148_LCSR_ITAT_BLT; + if (cycle & VME_MBLT) + temp_ctl |= TSI148_LCSR_ITAT_MBLT; + if (cycle & VME_2eVME) + temp_ctl |= TSI148_LCSR_ITAT_2eVME; + if (cycle & VME_2eSST) + temp_ctl |= TSI148_LCSR_ITAT_2eSST; + if (cycle & VME_2eSSTB) + temp_ctl |= TSI148_LCSR_ITAT_2eSSTB; + + /* Setup address space */ + temp_ctl &= ~TSI148_LCSR_ITAT_AS_M; + temp_ctl |= addr; + + temp_ctl &= ~0xF; + if (cycle & VME_SUPER) + temp_ctl |= TSI148_LCSR_ITAT_SUPR ; + if (cycle & VME_USER) + temp_ctl |= TSI148_LCSR_ITAT_NPRIV; + if (cycle & VME_PROG) + temp_ctl |= TSI148_LCSR_ITAT_PGM; + if (cycle & VME_DATA) + temp_ctl |= TSI148_LCSR_ITAT_DATA; + + /* Write ctl reg without enable */ + iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITAT); + + if (enabled) + temp_ctl |= TSI148_LCSR_ITAT_EN; + + iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITAT); + + return 0; +} + +/* + * Get slave window configuration. + */ +static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled, + unsigned long long *vme_base, unsigned long long *size, + dma_addr_t *pci_base, u32 *aspace, u32 *cycle) +{ + unsigned int i, granularity = 0, ctl = 0; + unsigned int vme_base_low, vme_base_high; + unsigned int vme_bound_low, vme_bound_high; + unsigned int pci_offset_low, pci_offset_high; + unsigned long long vme_bound, pci_offset; + struct tsi148_driver *bridge; + + bridge = image->parent->driver_priv; + + i = image->number; + + /* Read registers */ + ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITAT); + + vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITSAU); + vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITSAL); + vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITEAU); + vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITEAL); + pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITOFU); + pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITOFL); + + /* Convert 64-bit variables to 2x 32-bit variables */ + reg_join(vme_base_high, vme_base_low, vme_base); + reg_join(vme_bound_high, vme_bound_low, &vme_bound); + reg_join(pci_offset_high, pci_offset_low, &pci_offset); + + *pci_base = (dma_addr_t)(*vme_base + pci_offset); + + *enabled = 0; + *aspace = 0; + *cycle = 0; + + if (ctl & TSI148_LCSR_ITAT_EN) + *enabled = 1; + + if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) { + granularity = 0x10; + *aspace |= VME_A16; + } + if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) { + granularity = 0x1000; + *aspace |= VME_A24; + } + if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) { + granularity = 0x10000; + *aspace |= VME_A32; + } + if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) { + granularity = 0x10000; + *aspace |= VME_A64; + } + + /* Need granularity before we set the size */ + *size = (unsigned long long)((vme_bound - *vme_base) + granularity); + + + if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160) + *cycle |= VME_2eSST160; + if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267) + *cycle |= VME_2eSST267; + if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320) + *cycle |= VME_2eSST320; + + if (ctl & TSI148_LCSR_ITAT_BLT) + *cycle |= VME_BLT; + if (ctl & TSI148_LCSR_ITAT_MBLT) + *cycle |= VME_MBLT; + if (ctl & TSI148_LCSR_ITAT_2eVME) + *cycle |= VME_2eVME; + if (ctl & TSI148_LCSR_ITAT_2eSST) + *cycle |= VME_2eSST; + if (ctl & TSI148_LCSR_ITAT_2eSSTB) + *cycle |= VME_2eSSTB; + + if (ctl & TSI148_LCSR_ITAT_SUPR) + *cycle |= VME_SUPER; + if (ctl & TSI148_LCSR_ITAT_NPRIV) + *cycle |= VME_USER; + if (ctl & TSI148_LCSR_ITAT_PGM) + *cycle |= VME_PROG; + if (ctl & TSI148_LCSR_ITAT_DATA) + *cycle |= VME_DATA; + + return 0; +} + +/* + * Allocate and map PCI Resource + */ +static int tsi148_alloc_resource(struct vme_master_resource *image, + unsigned long long size) +{ + unsigned long long existing_size; + int retval = 0; + struct pci_dev *pdev; + struct vme_bridge *tsi148_bridge; + + tsi148_bridge = image->parent; + + pdev = to_pci_dev(tsi148_bridge->parent); + + existing_size = (unsigned long long)(image->bus_resource.end - + image->bus_resource.start); + + /* If the existing size is OK, return */ + if ((size != 0) && (existing_size == (size - 1))) + return 0; + + if (existing_size != 0) { + iounmap(image->kern_base); + image->kern_base = NULL; + kfree(image->bus_resource.name); + release_resource(&image->bus_resource); + memset(&image->bus_resource, 0, sizeof(image->bus_resource)); + } + + /* Exit here if size is zero */ + if (size == 0) + return 0; + + if (!image->bus_resource.name) { + image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC); + if (!image->bus_resource.name) { + retval = -ENOMEM; + goto err_name; + } + } + + sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name, + image->number); + + image->bus_resource.start = 0; + image->bus_resource.end = (unsigned long)size; + image->bus_resource.flags = IORESOURCE_MEM; + + retval = pci_bus_alloc_resource(pdev->bus, + &image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM, + 0, NULL, NULL); + if (retval) { + dev_err(tsi148_bridge->parent, "Failed to allocate mem " + "resource for window %d size 0x%lx start 0x%lx\n", + image->number, (unsigned long)size, + (unsigned long)image->bus_resource.start); + goto err_resource; + } + + image->kern_base = ioremap( + image->bus_resource.start, size); + if (!image->kern_base) { + dev_err(tsi148_bridge->parent, "Failed to remap resource\n"); + retval = -ENOMEM; + goto err_remap; + } + + return 0; + +err_remap: + release_resource(&image->bus_resource); +err_resource: + kfree(image->bus_resource.name); + memset(&image->bus_resource, 0, sizeof(image->bus_resource)); +err_name: + return retval; +} + +/* + * Free and unmap PCI Resource + */ +static void tsi148_free_resource(struct vme_master_resource *image) +{ + iounmap(image->kern_base); + image->kern_base = NULL; + release_resource(&image->bus_resource); + kfree(image->bus_resource.name); + memset(&image->bus_resource, 0, sizeof(image->bus_resource)); +} + +/* + * Set the attributes of an outbound window. + */ +static int tsi148_master_set(struct vme_master_resource *image, int enabled, + unsigned long long vme_base, unsigned long long size, u32 aspace, + u32 cycle, u32 dwidth) +{ + int retval = 0; + unsigned int i; + unsigned int temp_ctl = 0; + unsigned int pci_base_low, pci_base_high; + unsigned int pci_bound_low, pci_bound_high; + unsigned int vme_offset_low, vme_offset_high; + unsigned long long pci_bound, vme_offset, pci_base; + struct vme_bridge *tsi148_bridge; + struct tsi148_driver *bridge; + struct pci_bus_region region; + struct pci_dev *pdev; + + tsi148_bridge = image->parent; + + bridge = tsi148_bridge->driver_priv; + + pdev = to_pci_dev(tsi148_bridge->parent); + + /* Verify input data */ + if (vme_base & 0xFFFF) { + dev_err(tsi148_bridge->parent, "Invalid VME Window " + "alignment\n"); + retval = -EINVAL; + goto err_window; + } + + if ((size == 0) && (enabled != 0)) { + dev_err(tsi148_bridge->parent, "Size must be non-zero for " + "enabled windows\n"); + retval = -EINVAL; + goto err_window; + } + + spin_lock(&image->lock); + + /* Let's allocate the resource here rather than further up the stack as + * it avoids pushing loads of bus dependent stuff up the stack. If size + * is zero, any existing resource will be freed. + */ + retval = tsi148_alloc_resource(image, size); + if (retval) { + spin_unlock(&image->lock); + dev_err(tsi148_bridge->parent, "Unable to allocate memory for " + "resource\n"); + goto err_res; + } + + if (size == 0) { + pci_base = 0; + pci_bound = 0; + vme_offset = 0; + } else { + pcibios_resource_to_bus(pdev->bus, ®ion, + &image->bus_resource); + pci_base = region.start; + + /* + * Bound address is a valid address for the window, adjust + * according to window granularity. + */ + pci_bound = pci_base + (size - 0x10000); + vme_offset = vme_base - pci_base; + } + + /* Convert 64-bit variables to 2x 32-bit variables */ + reg_split(pci_base, &pci_base_high, &pci_base_low); + reg_split(pci_bound, &pci_bound_high, &pci_bound_low); + reg_split(vme_offset, &vme_offset_high, &vme_offset_low); + + if (pci_base_low & 0xFFFF) { + spin_unlock(&image->lock); + dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n"); + retval = -EINVAL; + goto err_gran; + } + if (pci_bound_low & 0xFFFF) { + spin_unlock(&image->lock); + dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n"); + retval = -EINVAL; + goto err_gran; + } + if (vme_offset_low & 0xFFFF) { + spin_unlock(&image->lock); + dev_err(tsi148_bridge->parent, "Invalid VME Offset " + "alignment\n"); + retval = -EINVAL; + goto err_gran; + } + + i = image->number; + + /* Disable while we are mucking around */ + temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTAT); + temp_ctl &= ~TSI148_LCSR_OTAT_EN; + iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTAT); + + /* Setup 2eSST speeds */ + temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M; + switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) { + case VME_2eSST160: + temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160; + break; + case VME_2eSST267: + temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267; + break; + case VME_2eSST320: + temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320; + break; + } + + /* Setup cycle types */ + if (cycle & VME_BLT) { + temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; + temp_ctl |= TSI148_LCSR_OTAT_TM_BLT; + } + if (cycle & VME_MBLT) { + temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; + temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT; + } + if (cycle & VME_2eVME) { + temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; + temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME; + } + if (cycle & VME_2eSST) { + temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; + temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST; + } + if (cycle & VME_2eSSTB) { + dev_warn(tsi148_bridge->parent, "Currently not setting " + "Broadcast Select Registers\n"); + temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; + temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB; + } + + /* Setup data width */ + temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M; + switch (dwidth) { + case VME_D16: + temp_ctl |= TSI148_LCSR_OTAT_DBW_16; + break; + case VME_D32: + temp_ctl |= TSI148_LCSR_OTAT_DBW_32; + break; + default: + spin_unlock(&image->lock); + dev_err(tsi148_bridge->parent, "Invalid data width\n"); + retval = -EINVAL; + goto err_dwidth; + } + + /* Setup address space */ + temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M; + switch (aspace) { + case VME_A16: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16; + break; + case VME_A24: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24; + break; + case VME_A32: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32; + break; + case VME_A64: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64; + break; + case VME_CRCSR: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR; + break; + case VME_USER1: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1; + break; + case VME_USER2: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2; + break; + case VME_USER3: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3; + break; + case VME_USER4: + temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4; + break; + default: + spin_unlock(&image->lock); + dev_err(tsi148_bridge->parent, "Invalid address space\n"); + retval = -EINVAL; + goto err_aspace; + } + + temp_ctl &= ~(3<<4); + if (cycle & VME_SUPER) + temp_ctl |= TSI148_LCSR_OTAT_SUP; + if (cycle & VME_PROG) + temp_ctl |= TSI148_LCSR_OTAT_PGM; + + /* Setup mapping */ + iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTSAU); + iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTSAL); + iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTEAU); + iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTEAL); + iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTOFU); + iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTOFL); + + /* Write ctl reg without enable */ + iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTAT); + + if (enabled) + temp_ctl |= TSI148_LCSR_OTAT_EN; + + iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTAT); + + spin_unlock(&image->lock); + return 0; + +err_aspace: +err_dwidth: +err_gran: + tsi148_free_resource(image); +err_res: +err_window: + return retval; + +} + +/* + * Set the attributes of an outbound window. + * + * XXX Not parsing prefetch information. + */ +static int __tsi148_master_get(struct vme_master_resource *image, int *enabled, + unsigned long long *vme_base, unsigned long long *size, u32 *aspace, + u32 *cycle, u32 *dwidth) +{ + unsigned int i, ctl; + unsigned int pci_base_low, pci_base_high; + unsigned int pci_bound_low, pci_bound_high; + unsigned int vme_offset_low, vme_offset_high; + + unsigned long long pci_base, pci_bound, vme_offset; + struct tsi148_driver *bridge; + + bridge = image->parent->driver_priv; + + i = image->number; + + ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTAT); + + pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTSAU); + pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTSAL); + pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTEAU); + pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTEAL); + vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTOFU); + vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTOFL); + + /* Convert 64-bit variables to 2x 32-bit variables */ + reg_join(pci_base_high, pci_base_low, &pci_base); + reg_join(pci_bound_high, pci_bound_low, &pci_bound); + reg_join(vme_offset_high, vme_offset_low, &vme_offset); + + *vme_base = pci_base + vme_offset; + *size = (unsigned long long)(pci_bound - pci_base) + 0x10000; + + *enabled = 0; + *aspace = 0; + *cycle = 0; + *dwidth = 0; + + if (ctl & TSI148_LCSR_OTAT_EN) + *enabled = 1; + + /* Setup address space */ + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16) + *aspace |= VME_A16; + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24) + *aspace |= VME_A24; + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32) + *aspace |= VME_A32; + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64) + *aspace |= VME_A64; + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR) + *aspace |= VME_CRCSR; + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1) + *aspace |= VME_USER1; + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2) + *aspace |= VME_USER2; + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3) + *aspace |= VME_USER3; + if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4) + *aspace |= VME_USER4; + + /* Setup 2eSST speeds */ + if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160) + *cycle |= VME_2eSST160; + if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267) + *cycle |= VME_2eSST267; + if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320) + *cycle |= VME_2eSST320; + + /* Setup cycle types */ + if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT) + *cycle |= VME_SCT; + if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT) + *cycle |= VME_BLT; + if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT) + *cycle |= VME_MBLT; + if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME) + *cycle |= VME_2eVME; + if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST) + *cycle |= VME_2eSST; + if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB) + *cycle |= VME_2eSSTB; + + if (ctl & TSI148_LCSR_OTAT_SUP) + *cycle |= VME_SUPER; + else + *cycle |= VME_USER; + + if (ctl & TSI148_LCSR_OTAT_PGM) + *cycle |= VME_PROG; + else + *cycle |= VME_DATA; + + /* Setup data width */ + if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16) + *dwidth = VME_D16; + if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32) + *dwidth = VME_D32; + + return 0; +} + + +static int tsi148_master_get(struct vme_master_resource *image, int *enabled, + unsigned long long *vme_base, unsigned long long *size, u32 *aspace, + u32 *cycle, u32 *dwidth) +{ + int retval; + + spin_lock(&image->lock); + + retval = __tsi148_master_get(image, enabled, vme_base, size, aspace, + cycle, dwidth); + + spin_unlock(&image->lock); + + return retval; +} + +static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf, + size_t count, loff_t offset) +{ + int retval, enabled; + unsigned long long vme_base, size; + u32 aspace, cycle, dwidth; + struct vme_error_handler *handler = NULL; + struct vme_bridge *tsi148_bridge; + void __iomem *addr = image->kern_base + offset; + unsigned int done = 0; + unsigned int count32; + + tsi148_bridge = image->parent; + + spin_lock(&image->lock); + + if (err_chk) { + __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, + &cycle, &dwidth); + handler = vme_register_error_handler(tsi148_bridge, aspace, + vme_base + offset, count); + if (!handler) { + spin_unlock(&image->lock); + return -ENOMEM; + } + } + + /* The following code handles VME address alignment. We cannot use + * memcpy_xxx here because it may cut data transfers in to 8-bit + * cycles when D16 or D32 cycles are required on the VME bus. + * On the other hand, the bridge itself assures that the maximum data + * cycle configured for the transfer is used and splits it + * automatically for non-aligned addresses, so we don't want the + * overhead of needlessly forcing small transfers for the entire cycle. + */ + if ((uintptr_t)addr & 0x1) { + *(u8 *)buf = ioread8(addr); + done += 1; + if (done == count) + goto out; + } + if ((uintptr_t)(addr + done) & 0x2) { + if ((count - done) < 2) { + *(u8 *)(buf + done) = ioread8(addr + done); + done += 1; + goto out; + } else { + *(u16 *)(buf + done) = ioread16(addr + done); + done += 2; + } + } + + count32 = (count - done) & ~0x3; + while (done < count32) { + *(u32 *)(buf + done) = ioread32(addr + done); + done += 4; + } + + if ((count - done) & 0x2) { + *(u16 *)(buf + done) = ioread16(addr + done); + done += 2; + } + if ((count - done) & 0x1) { + *(u8 *)(buf + done) = ioread8(addr + done); + done += 1; + } + +out: + retval = count; + + if (err_chk) { + if (handler->num_errors) { + dev_err(image->parent->parent, + "First VME read error detected an at address 0x%llx\n", + handler->first_error); + retval = handler->first_error - (vme_base + offset); + } + vme_unregister_error_handler(handler); + } + + spin_unlock(&image->lock); + + return retval; +} + + +static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf, + size_t count, loff_t offset) +{ + int retval = 0, enabled; + unsigned long long vme_base, size; + u32 aspace, cycle, dwidth; + void __iomem *addr = image->kern_base + offset; + unsigned int done = 0; + unsigned int count32; + + struct vme_error_handler *handler = NULL; + struct vme_bridge *tsi148_bridge; + struct tsi148_driver *bridge; + + tsi148_bridge = image->parent; + + bridge = tsi148_bridge->driver_priv; + + spin_lock(&image->lock); + + if (err_chk) { + __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, + &cycle, &dwidth); + handler = vme_register_error_handler(tsi148_bridge, aspace, + vme_base + offset, count); + if (!handler) { + spin_unlock(&image->lock); + return -ENOMEM; + } + } + + /* Here we apply for the same strategy we do in master_read + * function in order to assure the correct cycles. + */ + if ((uintptr_t)addr & 0x1) { + iowrite8(*(u8 *)buf, addr); + done += 1; + if (done == count) + goto out; + } + if ((uintptr_t)(addr + done) & 0x2) { + if ((count - done) < 2) { + iowrite8(*(u8 *)(buf + done), addr + done); + done += 1; + goto out; + } else { + iowrite16(*(u16 *)(buf + done), addr + done); + done += 2; + } + } + + count32 = (count - done) & ~0x3; + while (done < count32) { + iowrite32(*(u32 *)(buf + done), addr + done); + done += 4; + } + + if ((count - done) & 0x2) { + iowrite16(*(u16 *)(buf + done), addr + done); + done += 2; + } + if ((count - done) & 0x1) { + iowrite8(*(u8 *)(buf + done), addr + done); + done += 1; + } + +out: + retval = count; + + /* + * Writes are posted. We need to do a read on the VME bus to flush out + * all of the writes before we check for errors. We can't guarantee + * that reading the data we have just written is safe. It is believed + * that there isn't any read, write re-ordering, so we can read any + * location in VME space, so lets read the Device ID from the tsi148's + * own registers as mapped into CR/CSR space. + * + * We check for saved errors in the written address range/space. + */ + + if (err_chk) { + ioread16(bridge->flush_image->kern_base + 0x7F000); + + if (handler->num_errors) { + dev_warn(tsi148_bridge->parent, + "First VME write error detected an at address 0x%llx\n", + handler->first_error); + retval = handler->first_error - (vme_base + offset); + } + vme_unregister_error_handler(handler); + } + + spin_unlock(&image->lock); + + return retval; +} + +/* + * Perform an RMW cycle on the VME bus. + * + * Requires a previously configured master window, returns final value. + */ +static unsigned int tsi148_master_rmw(struct vme_master_resource *image, + unsigned int mask, unsigned int compare, unsigned int swap, + loff_t offset) +{ + unsigned long long pci_addr; + unsigned int pci_addr_high, pci_addr_low; + u32 tmp, result; + int i; + struct tsi148_driver *bridge; + + bridge = image->parent->driver_priv; + + /* Find the PCI address that maps to the desired VME address */ + i = image->number; + + /* Locking as we can only do one of these at a time */ + mutex_lock(&bridge->vme_rmw); + + /* Lock image */ + spin_lock(&image->lock); + + pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTSAU); + pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTSAL); + + reg_join(pci_addr_high, pci_addr_low, &pci_addr); + reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low); + + /* Configure registers */ + iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN); + iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC); + iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS); + iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU); + iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL); + + /* Enable RMW */ + tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL); + tmp |= TSI148_LCSR_VMCTRL_RMWEN; + iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL); + + /* Kick process off with a read to the required address. */ + result = ioread32be(image->kern_base + offset); + + /* Disable RMW */ + tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL); + tmp &= ~TSI148_LCSR_VMCTRL_RMWEN; + iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL); + + spin_unlock(&image->lock); + + mutex_unlock(&bridge->vme_rmw); + + return result; +} + +static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr, + u32 aspace, u32 cycle, u32 dwidth) +{ + u32 val; + + val = be32_to_cpu(*attr); + + /* Setup 2eSST speeds */ + switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) { + case VME_2eSST160: + val |= TSI148_LCSR_DSAT_2eSSTM_160; + break; + case VME_2eSST267: + val |= TSI148_LCSR_DSAT_2eSSTM_267; + break; + case VME_2eSST320: + val |= TSI148_LCSR_DSAT_2eSSTM_320; + break; + } + + /* Setup cycle types */ + if (cycle & VME_SCT) + val |= TSI148_LCSR_DSAT_TM_SCT; + + if (cycle & VME_BLT) + val |= TSI148_LCSR_DSAT_TM_BLT; + + if (cycle & VME_MBLT) + val |= TSI148_LCSR_DSAT_TM_MBLT; + + if (cycle & VME_2eVME) + val |= TSI148_LCSR_DSAT_TM_2eVME; + + if (cycle & VME_2eSST) + val |= TSI148_LCSR_DSAT_TM_2eSST; + + if (cycle & VME_2eSSTB) { + dev_err(dev, "Currently not setting Broadcast Select " + "Registers\n"); + val |= TSI148_LCSR_DSAT_TM_2eSSTB; + } + + /* Setup data width */ + switch (dwidth) { + case VME_D16: + val |= TSI148_LCSR_DSAT_DBW_16; + break; + case VME_D32: + val |= TSI148_LCSR_DSAT_DBW_32; + break; + default: + dev_err(dev, "Invalid data width\n"); + return -EINVAL; + } + + /* Setup address space */ + switch (aspace) { + case VME_A16: + val |= TSI148_LCSR_DSAT_AMODE_A16; + break; + case VME_A24: + val |= TSI148_LCSR_DSAT_AMODE_A24; + break; + case VME_A32: + val |= TSI148_LCSR_DSAT_AMODE_A32; + break; + case VME_A64: + val |= TSI148_LCSR_DSAT_AMODE_A64; + break; + case VME_CRCSR: + val |= TSI148_LCSR_DSAT_AMODE_CRCSR; + break; + case VME_USER1: + val |= TSI148_LCSR_DSAT_AMODE_USER1; + break; + case VME_USER2: + val |= TSI148_LCSR_DSAT_AMODE_USER2; + break; + case VME_USER3: + val |= TSI148_LCSR_DSAT_AMODE_USER3; + break; + case VME_USER4: + val |= TSI148_LCSR_DSAT_AMODE_USER4; + break; + default: + dev_err(dev, "Invalid address space\n"); + return -EINVAL; + } + + if (cycle & VME_SUPER) + val |= TSI148_LCSR_DSAT_SUP; + if (cycle & VME_PROG) + val |= TSI148_LCSR_DSAT_PGM; + + *attr = cpu_to_be32(val); + + return 0; +} + +static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr, + u32 aspace, u32 cycle, u32 dwidth) +{ + u32 val; + + val = be32_to_cpu(*attr); + + /* Setup 2eSST speeds */ + switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) { + case VME_2eSST160: + val |= TSI148_LCSR_DDAT_2eSSTM_160; + break; + case VME_2eSST267: + val |= TSI148_LCSR_DDAT_2eSSTM_267; + break; + case VME_2eSST320: + val |= TSI148_LCSR_DDAT_2eSSTM_320; + break; + } + + /* Setup cycle types */ + if (cycle & VME_SCT) + val |= TSI148_LCSR_DDAT_TM_SCT; + + if (cycle & VME_BLT) + val |= TSI148_LCSR_DDAT_TM_BLT; + + if (cycle & VME_MBLT) + val |= TSI148_LCSR_DDAT_TM_MBLT; + + if (cycle & VME_2eVME) + val |= TSI148_LCSR_DDAT_TM_2eVME; + + if (cycle & VME_2eSST) + val |= TSI148_LCSR_DDAT_TM_2eSST; + + if (cycle & VME_2eSSTB) { + dev_err(dev, "Currently not setting Broadcast Select " + "Registers\n"); + val |= TSI148_LCSR_DDAT_TM_2eSSTB; + } + + /* Setup data width */ + switch (dwidth) { + case VME_D16: + val |= TSI148_LCSR_DDAT_DBW_16; + break; + case VME_D32: + val |= TSI148_LCSR_DDAT_DBW_32; + break; + default: + dev_err(dev, "Invalid data width\n"); + return -EINVAL; + } + + /* Setup address space */ + switch (aspace) { + case VME_A16: + val |= TSI148_LCSR_DDAT_AMODE_A16; + break; + case VME_A24: + val |= TSI148_LCSR_DDAT_AMODE_A24; + break; + case VME_A32: + val |= TSI148_LCSR_DDAT_AMODE_A32; + break; + case VME_A64: + val |= TSI148_LCSR_DDAT_AMODE_A64; + break; + case VME_CRCSR: + val |= TSI148_LCSR_DDAT_AMODE_CRCSR; + break; + case VME_USER1: + val |= TSI148_LCSR_DDAT_AMODE_USER1; + break; + case VME_USER2: + val |= TSI148_LCSR_DDAT_AMODE_USER2; + break; + case VME_USER3: + val |= TSI148_LCSR_DDAT_AMODE_USER3; + break; + case VME_USER4: + val |= TSI148_LCSR_DDAT_AMODE_USER4; + break; + default: + dev_err(dev, "Invalid address space\n"); + return -EINVAL; + } + + if (cycle & VME_SUPER) + val |= TSI148_LCSR_DDAT_SUP; + if (cycle & VME_PROG) + val |= TSI148_LCSR_DDAT_PGM; + + *attr = cpu_to_be32(val); + + return 0; +} + +/* + * Add a link list descriptor to the list + * + * Note: DMA engine expects the DMA descriptor to be big endian. + */ +static int tsi148_dma_list_add(struct vme_dma_list *list, + struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count) +{ + struct tsi148_dma_entry *entry, *prev; + u32 address_high, address_low, val; + struct vme_dma_pattern *pattern_attr; + struct vme_dma_pci *pci_attr; + struct vme_dma_vme *vme_attr; + int retval = 0; + struct vme_bridge *tsi148_bridge; + + tsi148_bridge = list->parent->parent; + + /* Descriptor must be aligned on 64-bit boundaries */ + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + retval = -ENOMEM; + goto err_mem; + } + + /* Test descriptor alignment */ + if ((unsigned long)&entry->descriptor & 0x7) { + dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 " + "byte boundary as required: %p\n", + &entry->descriptor); + retval = -EINVAL; + goto err_align; + } + + /* Given we are going to fill out the structure, we probably don't + * need to zero it, but better safe than sorry for now. + */ + memset(&entry->descriptor, 0, sizeof(entry->descriptor)); + + /* Fill out source part */ + switch (src->type) { + case VME_DMA_PATTERN: + pattern_attr = src->private; + + entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern); + + val = TSI148_LCSR_DSAT_TYP_PAT; + + /* Default behaviour is 32 bit pattern */ + if (pattern_attr->type & VME_DMA_PATTERN_BYTE) + val |= TSI148_LCSR_DSAT_PSZ; + + /* It seems that the default behaviour is to increment */ + if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0) + val |= TSI148_LCSR_DSAT_NIN; + entry->descriptor.dsat = cpu_to_be32(val); + break; + case VME_DMA_PCI: + pci_attr = src->private; + + reg_split((unsigned long long)pci_attr->address, &address_high, + &address_low); + entry->descriptor.dsau = cpu_to_be32(address_high); + entry->descriptor.dsal = cpu_to_be32(address_low); + entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI); + break; + case VME_DMA_VME: + vme_attr = src->private; + + reg_split((unsigned long long)vme_attr->address, &address_high, + &address_low); + entry->descriptor.dsau = cpu_to_be32(address_high); + entry->descriptor.dsal = cpu_to_be32(address_low); + entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME); + + retval = tsi148_dma_set_vme_src_attributes( + tsi148_bridge->parent, &entry->descriptor.dsat, + vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth); + if (retval < 0) + goto err_source; + break; + default: + dev_err(tsi148_bridge->parent, "Invalid source type\n"); + retval = -EINVAL; + goto err_source; + } + + /* Assume last link - this will be over-written by adding another */ + entry->descriptor.dnlau = cpu_to_be32(0); + entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA); + + /* Fill out destination part */ + switch (dest->type) { + case VME_DMA_PCI: + pci_attr = dest->private; + + reg_split((unsigned long long)pci_attr->address, &address_high, + &address_low); + entry->descriptor.ddau = cpu_to_be32(address_high); + entry->descriptor.ddal = cpu_to_be32(address_low); + entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI); + break; + case VME_DMA_VME: + vme_attr = dest->private; + + reg_split((unsigned long long)vme_attr->address, &address_high, + &address_low); + entry->descriptor.ddau = cpu_to_be32(address_high); + entry->descriptor.ddal = cpu_to_be32(address_low); + entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME); + + retval = tsi148_dma_set_vme_dest_attributes( + tsi148_bridge->parent, &entry->descriptor.ddat, + vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth); + if (retval < 0) + goto err_dest; + break; + default: + dev_err(tsi148_bridge->parent, "Invalid destination type\n"); + retval = -EINVAL; + goto err_dest; + } + + /* Fill out count */ + entry->descriptor.dcnt = cpu_to_be32((u32)count); + + /* Add to list */ + list_add_tail(&entry->list, &list->entries); + + entry->dma_handle = dma_map_single(tsi148_bridge->parent, + &entry->descriptor, + sizeof(entry->descriptor), + DMA_TO_DEVICE); + if (dma_mapping_error(tsi148_bridge->parent, entry->dma_handle)) { + dev_err(tsi148_bridge->parent, "DMA mapping error\n"); + retval = -EINVAL; + goto err_dma; + } + + /* Fill out previous descriptors "Next Address" */ + if (entry->list.prev != &list->entries) { + reg_split((unsigned long long)entry->dma_handle, &address_high, + &address_low); + prev = list_entry(entry->list.prev, struct tsi148_dma_entry, + list); + prev->descriptor.dnlau = cpu_to_be32(address_high); + prev->descriptor.dnlal = cpu_to_be32(address_low); + + } + + return 0; + +err_dma: +err_dest: +err_source: +err_align: + kfree(entry); +err_mem: + return retval; +} + +/* + * Check to see if the provided DMA channel is busy. + */ +static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel) +{ + u32 tmp; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] + + TSI148_LCSR_OFFSET_DSTA); + + if (tmp & TSI148_LCSR_DSTA_BSY) + return 0; + else + return 1; + +} + +/* + * Execute a previously generated link list + * + * XXX Need to provide control register configuration. + */ +static int tsi148_dma_list_exec(struct vme_dma_list *list) +{ + struct vme_dma_resource *ctrlr; + int channel, retval; + struct tsi148_dma_entry *entry; + u32 bus_addr_high, bus_addr_low; + u32 val, dctlreg = 0; + struct vme_bridge *tsi148_bridge; + struct tsi148_driver *bridge; + + ctrlr = list->parent; + + tsi148_bridge = ctrlr->parent; + + bridge = tsi148_bridge->driver_priv; + + mutex_lock(&ctrlr->mtx); + + channel = ctrlr->number; + + if (!list_empty(&ctrlr->running)) { + /* + * XXX We have an active DMA transfer and currently haven't + * sorted out the mechanism for "pending" DMA transfers. + * Return busy. + */ + /* Need to add to pending here */ + mutex_unlock(&ctrlr->mtx); + return -EBUSY; + } else { + list_add(&list->list, &ctrlr->running); + } + + /* Get first bus address and write into registers */ + entry = list_first_entry(&list->entries, struct tsi148_dma_entry, + list); + + mutex_unlock(&ctrlr->mtx); + + reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low); + + iowrite32be(bus_addr_high, bridge->base + + TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU); + iowrite32be(bus_addr_low, bridge->base + + TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL); + + dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] + + TSI148_LCSR_OFFSET_DCTL); + + /* Start the operation */ + iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base + + TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL); + + retval = wait_event_interruptible(bridge->dma_queue[channel], + tsi148_dma_busy(ctrlr->parent, channel)); + + if (retval) { + iowrite32be(dctlreg | TSI148_LCSR_DCTL_ABT, bridge->base + + TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL); + /* Wait for the operation to abort */ + wait_event(bridge->dma_queue[channel], + tsi148_dma_busy(ctrlr->parent, channel)); + retval = -EINTR; + goto exit; + } + + /* + * Read status register, this register is valid until we kick off a + * new transfer. + */ + val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] + + TSI148_LCSR_OFFSET_DSTA); + + if (val & TSI148_LCSR_DSTA_VBE) { + dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val); + retval = -EIO; + } + +exit: + /* Remove list from running list */ + mutex_lock(&ctrlr->mtx); + list_del(&list->list); + mutex_unlock(&ctrlr->mtx); + + return retval; +} + +/* + * Clean up a previously generated link list + * + * We have a separate function, don't assume that the chain can't be reused. + */ +static int tsi148_dma_list_empty(struct vme_dma_list *list) +{ + struct list_head *pos, *temp; + struct tsi148_dma_entry *entry; + + struct vme_bridge *tsi148_bridge = list->parent->parent; + + /* detach and free each entry */ + list_for_each_safe(pos, temp, &list->entries) { + list_del(pos); + entry = list_entry(pos, struct tsi148_dma_entry, list); + + dma_unmap_single(tsi148_bridge->parent, entry->dma_handle, + sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE); + kfree(entry); + } + + return 0; +} + +/* + * All 4 location monitors reside at the same base - this is therefore a + * system wide configuration. + * + * This does not enable the LM monitor - that should be done when the first + * callback is attached and disabled when the last callback is removed. + */ +static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base, + u32 aspace, u32 cycle) +{ + u32 lm_base_high, lm_base_low, lm_ctl = 0; + int i; + struct vme_bridge *tsi148_bridge; + struct tsi148_driver *bridge; + + tsi148_bridge = lm->parent; + + bridge = tsi148_bridge->driver_priv; + + mutex_lock(&lm->mtx); + + /* If we already have a callback attached, we can't move it! */ + for (i = 0; i < lm->monitors; i++) { + if (bridge->lm_callback[i]) { + mutex_unlock(&lm->mtx); + dev_err(tsi148_bridge->parent, "Location monitor " + "callback attached, can't reset\n"); + return -EBUSY; + } + } + + switch (aspace) { + case VME_A16: + lm_ctl |= TSI148_LCSR_LMAT_AS_A16; + break; + case VME_A24: + lm_ctl |= TSI148_LCSR_LMAT_AS_A24; + break; + case VME_A32: + lm_ctl |= TSI148_LCSR_LMAT_AS_A32; + break; + case VME_A64: + lm_ctl |= TSI148_LCSR_LMAT_AS_A64; + break; + default: + mutex_unlock(&lm->mtx); + dev_err(tsi148_bridge->parent, "Invalid address space\n"); + return -EINVAL; + } + + if (cycle & VME_SUPER) + lm_ctl |= TSI148_LCSR_LMAT_SUPR ; + if (cycle & VME_USER) + lm_ctl |= TSI148_LCSR_LMAT_NPRIV; + if (cycle & VME_PROG) + lm_ctl |= TSI148_LCSR_LMAT_PGM; + if (cycle & VME_DATA) + lm_ctl |= TSI148_LCSR_LMAT_DATA; + + reg_split(lm_base, &lm_base_high, &lm_base_low); + + iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU); + iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL); + iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT); + + mutex_unlock(&lm->mtx); + + return 0; +} + +/* Get configuration of the callback monitor and return whether it is enabled + * or disabled. + */ +static int tsi148_lm_get(struct vme_lm_resource *lm, + unsigned long long *lm_base, u32 *aspace, u32 *cycle) +{ + u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0; + struct tsi148_driver *bridge; + + bridge = lm->parent->driver_priv; + + mutex_lock(&lm->mtx); + + lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU); + lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL); + lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT); + + reg_join(lm_base_high, lm_base_low, lm_base); + + if (lm_ctl & TSI148_LCSR_LMAT_EN) + enabled = 1; + + if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16) + *aspace |= VME_A16; + + if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24) + *aspace |= VME_A24; + + if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32) + *aspace |= VME_A32; + + if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64) + *aspace |= VME_A64; + + + if (lm_ctl & TSI148_LCSR_LMAT_SUPR) + *cycle |= VME_SUPER; + if (lm_ctl & TSI148_LCSR_LMAT_NPRIV) + *cycle |= VME_USER; + if (lm_ctl & TSI148_LCSR_LMAT_PGM) + *cycle |= VME_PROG; + if (lm_ctl & TSI148_LCSR_LMAT_DATA) + *cycle |= VME_DATA; + + mutex_unlock(&lm->mtx); + + return enabled; +} + +/* + * Attach a callback to a specific location monitor. + * + * Callback will be passed the monitor triggered. + */ +static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor, + void (*callback)(void *), void *data) +{ + u32 lm_ctl, tmp; + struct vme_bridge *tsi148_bridge; + struct tsi148_driver *bridge; + + tsi148_bridge = lm->parent; + + bridge = tsi148_bridge->driver_priv; + + mutex_lock(&lm->mtx); + + /* Ensure that the location monitor is configured - need PGM or DATA */ + lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT); + if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) { + mutex_unlock(&lm->mtx); + dev_err(tsi148_bridge->parent, "Location monitor not properly " + "configured\n"); + return -EINVAL; + } + + /* Check that a callback isn't already attached */ + if (bridge->lm_callback[monitor]) { + mutex_unlock(&lm->mtx); + dev_err(tsi148_bridge->parent, "Existing callback attached\n"); + return -EBUSY; + } + + /* Attach callback */ + bridge->lm_callback[monitor] = callback; + bridge->lm_data[monitor] = data; + + /* Enable Location Monitor interrupt */ + tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN); + tmp |= TSI148_LCSR_INTEN_LMEN[monitor]; + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN); + + tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO); + tmp |= TSI148_LCSR_INTEO_LMEO[monitor]; + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); + + /* Ensure that global Location Monitor Enable set */ + if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) { + lm_ctl |= TSI148_LCSR_LMAT_EN; + iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT); + } + + mutex_unlock(&lm->mtx); + + return 0; +} + +/* + * Detach a callback function forn a specific location monitor. + */ +static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor) +{ + u32 lm_en, tmp; + struct tsi148_driver *bridge; + + bridge = lm->parent->driver_priv; + + mutex_lock(&lm->mtx); + + /* Disable Location Monitor and ensure previous interrupts are clear */ + lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN); + lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor]; + iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN); + + tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO); + tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor]; + iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); + + iowrite32be(TSI148_LCSR_INTC_LMC[monitor], + bridge->base + TSI148_LCSR_INTC); + + /* Detach callback */ + bridge->lm_callback[monitor] = NULL; + bridge->lm_data[monitor] = NULL; + + /* If all location monitors disabled, disable global Location Monitor */ + if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S | + TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) { + tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT); + tmp &= ~TSI148_LCSR_LMAT_EN; + iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT); + } + + mutex_unlock(&lm->mtx); + + return 0; +} + +/* + * Determine Geographical Addressing + */ +static int tsi148_slot_get(struct vme_bridge *tsi148_bridge) +{ + u32 slot = 0; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + if (!geoid) { + slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT); + slot = slot & TSI148_LCSR_VSTAT_GA_M; + } else + slot = geoid; + + return (int)slot; +} + +static void *tsi148_alloc_consistent(struct device *parent, size_t size, + dma_addr_t *dma) +{ + struct pci_dev *pdev; + + /* Find pci_dev container of dev */ + pdev = to_pci_dev(parent); + + return dma_alloc_coherent(&pdev->dev, size, dma, GFP_KERNEL); +} + +static void tsi148_free_consistent(struct device *parent, size_t size, + void *vaddr, dma_addr_t dma) +{ + struct pci_dev *pdev; + + /* Find pci_dev container of dev */ + pdev = to_pci_dev(parent); + + dma_free_coherent(&pdev->dev, size, vaddr, dma); +} + +/* + * Configure CR/CSR space + * + * Access to the CR/CSR can be configured at power-up. The location of the + * CR/CSR registers in the CR/CSR address space is determined by the boards + * Auto-ID or Geographic address. This function ensures that the window is + * enabled at an offset consistent with the boards geopgraphic address. + * + * Each board has a 512kB window, with the highest 4kB being used for the + * boards registers, this means there is a fix length 508kB window which must + * be mapped onto PCI memory. + */ +static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge, + struct pci_dev *pdev) +{ + u32 cbar, crat, vstat; + u32 crcsr_bus_high, crcsr_bus_low; + int retval; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + /* Allocate mem for CR/CSR image */ + bridge->crcsr_kernel = dma_alloc_coherent(&pdev->dev, + VME_CRCSR_BUF_SIZE, + &bridge->crcsr_bus, GFP_KERNEL); + if (!bridge->crcsr_kernel) { + dev_err(tsi148_bridge->parent, "Failed to allocate memory for " + "CR/CSR image\n"); + return -ENOMEM; + } + + reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low); + + iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU); + iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL); + + /* Ensure that the CR/CSR is configured at the correct offset */ + cbar = ioread32be(bridge->base + TSI148_CBAR); + cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3; + + vstat = tsi148_slot_get(tsi148_bridge); + + if (cbar != vstat) { + cbar = vstat; + dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n"); + iowrite32be(cbar<<3, bridge->base + TSI148_CBAR); + } + dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar); + + crat = ioread32be(bridge->base + TSI148_LCSR_CRAT); + if (crat & TSI148_LCSR_CRAT_EN) + dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n"); + else { + dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n"); + iowrite32be(crat | TSI148_LCSR_CRAT_EN, + bridge->base + TSI148_LCSR_CRAT); + } + + /* If we want flushed, error-checked writes, set up a window + * over the CR/CSR registers. We read from here to safely flush + * through VME writes. + */ + if (err_chk) { + retval = tsi148_master_set(bridge->flush_image, 1, + (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT, + VME_D16); + if (retval) + dev_err(tsi148_bridge->parent, "Configuring flush image" + " failed\n"); + } + + return 0; + +} + +static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge, + struct pci_dev *pdev) +{ + u32 crat; + struct tsi148_driver *bridge; + + bridge = tsi148_bridge->driver_priv; + + /* Turn off CR/CSR space */ + crat = ioread32be(bridge->base + TSI148_LCSR_CRAT); + iowrite32be(crat & ~TSI148_LCSR_CRAT_EN, + bridge->base + TSI148_LCSR_CRAT); + + /* Free image */ + iowrite32be(0, bridge->base + TSI148_LCSR_CROU); + iowrite32be(0, bridge->base + TSI148_LCSR_CROL); + + dma_free_coherent(&pdev->dev, VME_CRCSR_BUF_SIZE, + bridge->crcsr_kernel, bridge->crcsr_bus); +} + +static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int retval, i, master_num; + u32 data; + struct list_head *pos = NULL, *n; + struct vme_bridge *tsi148_bridge; + struct tsi148_driver *tsi148_device; + struct vme_master_resource *master_image; + struct vme_slave_resource *slave_image; + struct vme_dma_resource *dma_ctrlr; + struct vme_lm_resource *lm; + + /* If we want to support more than one of each bridge, we need to + * dynamically generate this so we get one per device + */ + tsi148_bridge = kzalloc(sizeof(*tsi148_bridge), GFP_KERNEL); + if (!tsi148_bridge) { + retval = -ENOMEM; + goto err_struct; + } + vme_init_bridge(tsi148_bridge); + + tsi148_device = kzalloc(sizeof(*tsi148_device), GFP_KERNEL); + if (!tsi148_device) { + retval = -ENOMEM; + goto err_driver; + } + + tsi148_bridge->driver_priv = tsi148_device; + + /* Enable the device */ + retval = pci_enable_device(pdev); + if (retval) { + dev_err(&pdev->dev, "Unable to enable device\n"); + goto err_enable; + } + + /* Map Registers */ + retval = pci_request_regions(pdev, driver_name); + if (retval) { + dev_err(&pdev->dev, "Unable to reserve resources\n"); + goto err_resource; + } + + /* map registers in BAR 0 */ + tsi148_device->base = ioremap(pci_resource_start(pdev, 0), + 4096); + if (!tsi148_device->base) { + dev_err(&pdev->dev, "Unable to remap CRG region\n"); + retval = -EIO; + goto err_remap; + } + + /* Check to see if the mapping worked out */ + data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF; + if (data != PCI_VENDOR_ID_TUNDRA) { + dev_err(&pdev->dev, "CRG region check failed\n"); + retval = -EIO; + goto err_test; + } + + /* Initialize wait queues & mutual exclusion flags */ + init_waitqueue_head(&tsi148_device->dma_queue[0]); + init_waitqueue_head(&tsi148_device->dma_queue[1]); + init_waitqueue_head(&tsi148_device->iack_queue); + mutex_init(&tsi148_device->vme_int); + mutex_init(&tsi148_device->vme_rmw); + + tsi148_bridge->parent = &pdev->dev; + strcpy(tsi148_bridge->name, driver_name); + + /* Setup IRQ */ + retval = tsi148_irq_init(tsi148_bridge); + if (retval != 0) { + dev_err(&pdev->dev, "Chip Initialization failed.\n"); + goto err_irq; + } + + /* If we are going to flush writes, we need to read from the VME bus. + * We need to do this safely, thus we read the devices own CR/CSR + * register. To do this we must set up a window in CR/CSR space and + * hence have one less master window resource available. + */ + master_num = TSI148_MAX_MASTER; + if (err_chk) { + master_num--; + + tsi148_device->flush_image = + kmalloc(sizeof(*tsi148_device->flush_image), + GFP_KERNEL); + if (!tsi148_device->flush_image) { + retval = -ENOMEM; + goto err_master; + } + tsi148_device->flush_image->parent = tsi148_bridge; + spin_lock_init(&tsi148_device->flush_image->lock); + tsi148_device->flush_image->locked = 1; + tsi148_device->flush_image->number = master_num; + memset(&tsi148_device->flush_image->bus_resource, 0, + sizeof(tsi148_device->flush_image->bus_resource)); + tsi148_device->flush_image->kern_base = NULL; + } + + /* Add master windows to list */ + for (i = 0; i < master_num; i++) { + master_image = kmalloc(sizeof(*master_image), GFP_KERNEL); + if (!master_image) { + retval = -ENOMEM; + goto err_master; + } + master_image->parent = tsi148_bridge; + spin_lock_init(&master_image->lock); + master_image->locked = 0; + master_image->number = i; + master_image->address_attr = VME_A16 | VME_A24 | VME_A32 | + VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 | + VME_USER3 | VME_USER4; + master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT | + VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 | + VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER | + VME_PROG | VME_DATA; + master_image->width_attr = VME_D16 | VME_D32; + memset(&master_image->bus_resource, 0, + sizeof(master_image->bus_resource)); + master_image->kern_base = NULL; + list_add_tail(&master_image->list, + &tsi148_bridge->master_resources); + } + + /* Add slave windows to list */ + for (i = 0; i < TSI148_MAX_SLAVE; i++) { + slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL); + if (!slave_image) { + retval = -ENOMEM; + goto err_slave; + } + slave_image->parent = tsi148_bridge; + mutex_init(&slave_image->mtx); + slave_image->locked = 0; + slave_image->number = i; + slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 | + VME_A64; + slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT | + VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 | + VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER | + VME_PROG | VME_DATA; + list_add_tail(&slave_image->list, + &tsi148_bridge->slave_resources); + } + + /* Add dma engines to list */ + for (i = 0; i < TSI148_MAX_DMA; i++) { + dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL); + if (!dma_ctrlr) { + retval = -ENOMEM; + goto err_dma; + } + dma_ctrlr->parent = tsi148_bridge; + mutex_init(&dma_ctrlr->mtx); + dma_ctrlr->locked = 0; + dma_ctrlr->number = i; + dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM | + VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME | + VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME | + VME_DMA_PATTERN_TO_MEM; + INIT_LIST_HEAD(&dma_ctrlr->pending); + INIT_LIST_HEAD(&dma_ctrlr->running); + list_add_tail(&dma_ctrlr->list, + &tsi148_bridge->dma_resources); + } + + /* Add location monitor to list */ + lm = kmalloc(sizeof(*lm), GFP_KERNEL); + if (!lm) { + retval = -ENOMEM; + goto err_lm; + } + lm->parent = tsi148_bridge; + mutex_init(&lm->mtx); + lm->locked = 0; + lm->number = 1; + lm->monitors = 4; + list_add_tail(&lm->list, &tsi148_bridge->lm_resources); + + tsi148_bridge->slave_get = tsi148_slave_get; + tsi148_bridge->slave_set = tsi148_slave_set; + tsi148_bridge->master_get = tsi148_master_get; + tsi148_bridge->master_set = tsi148_master_set; + tsi148_bridge->master_read = tsi148_master_read; + tsi148_bridge->master_write = tsi148_master_write; + tsi148_bridge->master_rmw = tsi148_master_rmw; + tsi148_bridge->dma_list_add = tsi148_dma_list_add; + tsi148_bridge->dma_list_exec = tsi148_dma_list_exec; + tsi148_bridge->dma_list_empty = tsi148_dma_list_empty; + tsi148_bridge->irq_set = tsi148_irq_set; + tsi148_bridge->irq_generate = tsi148_irq_generate; + tsi148_bridge->lm_set = tsi148_lm_set; + tsi148_bridge->lm_get = tsi148_lm_get; + tsi148_bridge->lm_attach = tsi148_lm_attach; + tsi148_bridge->lm_detach = tsi148_lm_detach; + tsi148_bridge->slot_get = tsi148_slot_get; + tsi148_bridge->alloc_consistent = tsi148_alloc_consistent; + tsi148_bridge->free_consistent = tsi148_free_consistent; + + data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT); + dev_info(&pdev->dev, "Board is%s the VME system controller\n", + (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not"); + if (!geoid) + dev_info(&pdev->dev, "VME geographical address is %d\n", + data & TSI148_LCSR_VSTAT_GA_M); + else + dev_info(&pdev->dev, "VME geographical address is set to %d\n", + geoid); + + dev_info(&pdev->dev, "VME Write and flush and error check is %s\n", + err_chk ? "enabled" : "disabled"); + + retval = tsi148_crcsr_init(tsi148_bridge, pdev); + if (retval) { + dev_err(&pdev->dev, "CR/CSR configuration failed.\n"); + goto err_crcsr; + } + + retval = vme_register_bridge(tsi148_bridge); + if (retval != 0) { + dev_err(&pdev->dev, "Chip Registration failed.\n"); + goto err_reg; + } + + pci_set_drvdata(pdev, tsi148_bridge); + + /* Clear VME bus "board fail", and "power-up reset" lines */ + data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT); + data &= ~TSI148_LCSR_VSTAT_BRDFL; + data |= TSI148_LCSR_VSTAT_CPURST; + iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT); + + return 0; + +err_reg: + tsi148_crcsr_exit(tsi148_bridge, pdev); +err_crcsr: +err_lm: + /* resources are stored in link list */ + list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) { + lm = list_entry(pos, struct vme_lm_resource, list); + list_del(pos); + kfree(lm); + } +err_dma: + /* resources are stored in link list */ + list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) { + dma_ctrlr = list_entry(pos, struct vme_dma_resource, list); + list_del(pos); + kfree(dma_ctrlr); + } +err_slave: + /* resources are stored in link list */ + list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) { + slave_image = list_entry(pos, struct vme_slave_resource, list); + list_del(pos); + kfree(slave_image); + } +err_master: + /* resources are stored in link list */ + list_for_each_safe(pos, n, &tsi148_bridge->master_resources) { + master_image = list_entry(pos, struct vme_master_resource, + list); + list_del(pos); + kfree(master_image); + } + + tsi148_irq_exit(tsi148_bridge, pdev); +err_irq: +err_test: + iounmap(tsi148_device->base); +err_remap: + pci_release_regions(pdev); +err_resource: + pci_disable_device(pdev); +err_enable: + kfree(tsi148_device); +err_driver: + kfree(tsi148_bridge); +err_struct: + return retval; + +} + +static void tsi148_remove(struct pci_dev *pdev) +{ + struct list_head *pos = NULL; + struct list_head *tmplist; + struct vme_master_resource *master_image; + struct vme_slave_resource *slave_image; + struct vme_dma_resource *dma_ctrlr; + int i; + struct tsi148_driver *bridge; + struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev); + + bridge = tsi148_bridge->driver_priv; + + + dev_dbg(&pdev->dev, "Driver is being unloaded.\n"); + + /* + * Shutdown all inbound and outbound windows. + */ + for (i = 0; i < 8; i++) { + iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] + + TSI148_LCSR_OFFSET_ITAT); + iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] + + TSI148_LCSR_OFFSET_OTAT); + } + + /* + * Shutdown Location monitor. + */ + iowrite32be(0, bridge->base + TSI148_LCSR_LMAT); + + /* + * Shutdown CRG map. + */ + iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT); + + /* + * Clear error status. + */ + iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT); + iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT); + iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT); + + /* + * Remove VIRQ interrupt (if any) + */ + if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800) + iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR); + + /* + * Map all Interrupts to PCI INTA + */ + iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1); + iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2); + + tsi148_irq_exit(tsi148_bridge, pdev); + + vme_unregister_bridge(tsi148_bridge); + + tsi148_crcsr_exit(tsi148_bridge, pdev); + + /* resources are stored in link list */ + list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) { + dma_ctrlr = list_entry(pos, struct vme_dma_resource, list); + list_del(pos); + kfree(dma_ctrlr); + } + + /* resources are stored in link list */ + list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) { + slave_image = list_entry(pos, struct vme_slave_resource, list); + list_del(pos); + kfree(slave_image); + } + + /* resources are stored in link list */ + list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) { + master_image = list_entry(pos, struct vme_master_resource, + list); + list_del(pos); + kfree(master_image); + } + + iounmap(bridge->base); + + pci_release_regions(pdev); + + pci_disable_device(pdev); + + kfree(tsi148_bridge->driver_priv); + + kfree(tsi148_bridge); +} + +module_pci_driver(tsi148_driver); + +MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes"); +module_param(err_chk, bool, 0); + +MODULE_PARM_DESC(geoid, "Override geographical addressing"); +module_param(geoid, int, 0); + +MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/vme_user/vme_tsi148.h b/drivers/staging/vme_user/vme_tsi148.h new file mode 100644 index 000000000000..226fedc6f167 --- /dev/null +++ b/drivers/staging/vme_user/vme_tsi148.h @@ -0,0 +1,1407 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * tsi148.h + * + * Support for the Tundra TSI148 VME Bridge chip + * + * Author: Tom Armistead + * Updated and maintained by Ajit Prem + * Copyright 2004 Motorola Inc. + */ + +#ifndef TSI148_H +#define TSI148_H + +#ifndef PCI_VENDOR_ID_TUNDRA +#define PCI_VENDOR_ID_TUNDRA 0x10e3 +#endif + +#ifndef PCI_DEVICE_ID_TUNDRA_TSI148 +#define PCI_DEVICE_ID_TUNDRA_TSI148 0x148 +#endif + +/* + * Define the number of each that the Tsi148 supports. + */ +#define TSI148_MAX_MASTER 8 /* Max Master Windows */ +#define TSI148_MAX_SLAVE 8 /* Max Slave Windows */ +#define TSI148_MAX_DMA 2 /* Max DMA Controllers */ +#define TSI148_MAX_MAILBOX 4 /* Max Mail Box registers */ +#define TSI148_MAX_SEMAPHORE 8 /* Max Semaphores */ + +/* Structure used to hold driver specific information */ +struct tsi148_driver { + void __iomem *base; /* Base Address of device registers */ + wait_queue_head_t dma_queue[2]; + wait_queue_head_t iack_queue; + void (*lm_callback[4])(void *); /* Called in interrupt handler */ + void *lm_data[4]; + void *crcsr_kernel; + dma_addr_t crcsr_bus; + struct vme_master_resource *flush_image; + struct mutex vme_rmw; /* Only one RMW cycle at a time */ + struct mutex vme_int; /* + * Only one VME interrupt can be + * generated at a time, provide locking + */ +}; + +/* + * Layout of a DMAC Linked-List Descriptor + * + * Note: This structure is accessed via the chip and therefore must be + * correctly laid out - It must also be aligned on 64-bit boundaries. + */ +struct tsi148_dma_descriptor { + __be32 dsau; /* Source Address */ + __be32 dsal; + __be32 ddau; /* Destination Address */ + __be32 ddal; + __be32 dsat; /* Source attributes */ + __be32 ddat; /* Destination attributes */ + __be32 dnlau; /* Next link address */ + __be32 dnlal; + __be32 dcnt; /* Byte count */ + __be32 ddbs; /* 2eSST Broadcast select */ +}; + +struct tsi148_dma_entry { + /* + * The descriptor needs to be aligned on a 64-bit boundary, we increase + * the chance of this by putting it first in the structure. + */ + struct tsi148_dma_descriptor descriptor; + struct list_head list; + dma_addr_t dma_handle; +}; + +/* + * TSI148 ASIC register structure overlays and bit field definitions. + * + * Note: Tsi148 Register Group (CRG) consists of the following + * combination of registers: + * PCFS - PCI Configuration Space Registers + * LCSR - Local Control and Status Registers + * GCSR - Global Control and Status Registers + * CR/CSR - Subset of Configuration ROM / + * Control and Status Registers + */ + + +/* + * Command/Status Registers (CRG + $004) + */ +#define TSI148_PCFS_ID 0x0 +#define TSI148_PCFS_CSR 0x4 +#define TSI148_PCFS_CLASS 0x8 +#define TSI148_PCFS_MISC0 0xC +#define TSI148_PCFS_MBARL 0x10 +#define TSI148_PCFS_MBARU 0x14 + +#define TSI148_PCFS_SUBID 0x28 + +#define TSI148_PCFS_CAPP 0x34 + +#define TSI148_PCFS_MISC1 0x3C + +#define TSI148_PCFS_XCAPP 0x40 +#define TSI148_PCFS_XSTAT 0x44 + +/* + * LCSR definitions + */ + +/* + * Outbound Translations + */ +#define TSI148_LCSR_OT0_OTSAU 0x100 +#define TSI148_LCSR_OT0_OTSAL 0x104 +#define TSI148_LCSR_OT0_OTEAU 0x108 +#define TSI148_LCSR_OT0_OTEAL 0x10C +#define TSI148_LCSR_OT0_OTOFU 0x110 +#define TSI148_LCSR_OT0_OTOFL 0x114 +#define TSI148_LCSR_OT0_OTBS 0x118 +#define TSI148_LCSR_OT0_OTAT 0x11C + +#define TSI148_LCSR_OT1_OTSAU 0x120 +#define TSI148_LCSR_OT1_OTSAL 0x124 +#define TSI148_LCSR_OT1_OTEAU 0x128 +#define TSI148_LCSR_OT1_OTEAL 0x12C +#define TSI148_LCSR_OT1_OTOFU 0x130 +#define TSI148_LCSR_OT1_OTOFL 0x134 +#define TSI148_LCSR_OT1_OTBS 0x138 +#define TSI148_LCSR_OT1_OTAT 0x13C + +#define TSI148_LCSR_OT2_OTSAU 0x140 +#define TSI148_LCSR_OT2_OTSAL 0x144 +#define TSI148_LCSR_OT2_OTEAU 0x148 +#define TSI148_LCSR_OT2_OTEAL 0x14C +#define TSI148_LCSR_OT2_OTOFU 0x150 +#define TSI148_LCSR_OT2_OTOFL 0x154 +#define TSI148_LCSR_OT2_OTBS 0x158 +#define TSI148_LCSR_OT2_OTAT 0x15C + +#define TSI148_LCSR_OT3_OTSAU 0x160 +#define TSI148_LCSR_OT3_OTSAL 0x164 +#define TSI148_LCSR_OT3_OTEAU 0x168 +#define TSI148_LCSR_OT3_OTEAL 0x16C +#define TSI148_LCSR_OT3_OTOFU 0x170 +#define TSI148_LCSR_OT3_OTOFL 0x174 +#define TSI148_LCSR_OT3_OTBS 0x178 +#define TSI148_LCSR_OT3_OTAT 0x17C + +#define TSI148_LCSR_OT4_OTSAU 0x180 +#define TSI148_LCSR_OT4_OTSAL 0x184 +#define TSI148_LCSR_OT4_OTEAU 0x188 +#define TSI148_LCSR_OT4_OTEAL 0x18C +#define TSI148_LCSR_OT4_OTOFU 0x190 +#define TSI148_LCSR_OT4_OTOFL 0x194 +#define TSI148_LCSR_OT4_OTBS 0x198 +#define TSI148_LCSR_OT4_OTAT 0x19C + +#define TSI148_LCSR_OT5_OTSAU 0x1A0 +#define TSI148_LCSR_OT5_OTSAL 0x1A4 +#define TSI148_LCSR_OT5_OTEAU 0x1A8 +#define TSI148_LCSR_OT5_OTEAL 0x1AC +#define TSI148_LCSR_OT5_OTOFU 0x1B0 +#define TSI148_LCSR_OT5_OTOFL 0x1B4 +#define TSI148_LCSR_OT5_OTBS 0x1B8 +#define TSI148_LCSR_OT5_OTAT 0x1BC + +#define TSI148_LCSR_OT6_OTSAU 0x1C0 +#define TSI148_LCSR_OT6_OTSAL 0x1C4 +#define TSI148_LCSR_OT6_OTEAU 0x1C8 +#define TSI148_LCSR_OT6_OTEAL 0x1CC +#define TSI148_LCSR_OT6_OTOFU 0x1D0 +#define TSI148_LCSR_OT6_OTOFL 0x1D4 +#define TSI148_LCSR_OT6_OTBS 0x1D8 +#define TSI148_LCSR_OT6_OTAT 0x1DC + +#define TSI148_LCSR_OT7_OTSAU 0x1E0 +#define TSI148_LCSR_OT7_OTSAL 0x1E4 +#define TSI148_LCSR_OT7_OTEAU 0x1E8 +#define TSI148_LCSR_OT7_OTEAL 0x1EC +#define TSI148_LCSR_OT7_OTOFU 0x1F0 +#define TSI148_LCSR_OT7_OTOFL 0x1F4 +#define TSI148_LCSR_OT7_OTBS 0x1F8 +#define TSI148_LCSR_OT7_OTAT 0x1FC + +#define TSI148_LCSR_OT0 0x100 +#define TSI148_LCSR_OT1 0x120 +#define TSI148_LCSR_OT2 0x140 +#define TSI148_LCSR_OT3 0x160 +#define TSI148_LCSR_OT4 0x180 +#define TSI148_LCSR_OT5 0x1A0 +#define TSI148_LCSR_OT6 0x1C0 +#define TSI148_LCSR_OT7 0x1E0 + +static const int TSI148_LCSR_OT[8] = { TSI148_LCSR_OT0, TSI148_LCSR_OT1, + TSI148_LCSR_OT2, TSI148_LCSR_OT3, + TSI148_LCSR_OT4, TSI148_LCSR_OT5, + TSI148_LCSR_OT6, TSI148_LCSR_OT7 }; + +#define TSI148_LCSR_OFFSET_OTSAU 0x0 +#define TSI148_LCSR_OFFSET_OTSAL 0x4 +#define TSI148_LCSR_OFFSET_OTEAU 0x8 +#define TSI148_LCSR_OFFSET_OTEAL 0xC +#define TSI148_LCSR_OFFSET_OTOFU 0x10 +#define TSI148_LCSR_OFFSET_OTOFL 0x14 +#define TSI148_LCSR_OFFSET_OTBS 0x18 +#define TSI148_LCSR_OFFSET_OTAT 0x1C + +/* + * VMEbus interrupt ack + * offset 200 + */ +#define TSI148_LCSR_VIACK1 0x204 +#define TSI148_LCSR_VIACK2 0x208 +#define TSI148_LCSR_VIACK3 0x20C +#define TSI148_LCSR_VIACK4 0x210 +#define TSI148_LCSR_VIACK5 0x214 +#define TSI148_LCSR_VIACK6 0x218 +#define TSI148_LCSR_VIACK7 0x21C + +static const int TSI148_LCSR_VIACK[8] = { 0, TSI148_LCSR_VIACK1, + TSI148_LCSR_VIACK2, TSI148_LCSR_VIACK3, + TSI148_LCSR_VIACK4, TSI148_LCSR_VIACK5, + TSI148_LCSR_VIACK6, TSI148_LCSR_VIACK7 }; + +/* + * RMW + * offset 220 + */ +#define TSI148_LCSR_RMWAU 0x220 +#define TSI148_LCSR_RMWAL 0x224 +#define TSI148_LCSR_RMWEN 0x228 +#define TSI148_LCSR_RMWC 0x22C +#define TSI148_LCSR_RMWS 0x230 + +/* + * VMEbus control + * offset 234 + */ +#define TSI148_LCSR_VMCTRL 0x234 +#define TSI148_LCSR_VCTRL 0x238 +#define TSI148_LCSR_VSTAT 0x23C + +/* + * PCI status + * offset 240 + */ +#define TSI148_LCSR_PSTAT 0x240 + +/* + * VME filter. + * offset 250 + */ +#define TSI148_LCSR_VMEFL 0x250 + + /* + * VME exception. + * offset 260 + */ +#define TSI148_LCSR_VEAU 0x260 +#define TSI148_LCSR_VEAL 0x264 +#define TSI148_LCSR_VEAT 0x268 + + /* + * PCI error + * offset 270 + */ +#define TSI148_LCSR_EDPAU 0x270 +#define TSI148_LCSR_EDPAL 0x274 +#define TSI148_LCSR_EDPXA 0x278 +#define TSI148_LCSR_EDPXS 0x27C +#define TSI148_LCSR_EDPAT 0x280 + + /* + * Inbound Translations + * offset 300 + */ +#define TSI148_LCSR_IT0_ITSAU 0x300 +#define TSI148_LCSR_IT0_ITSAL 0x304 +#define TSI148_LCSR_IT0_ITEAU 0x308 +#define TSI148_LCSR_IT0_ITEAL 0x30C +#define TSI148_LCSR_IT0_ITOFU 0x310 +#define TSI148_LCSR_IT0_ITOFL 0x314 +#define TSI148_LCSR_IT0_ITAT 0x318 + +#define TSI148_LCSR_IT1_ITSAU 0x320 +#define TSI148_LCSR_IT1_ITSAL 0x324 +#define TSI148_LCSR_IT1_ITEAU 0x328 +#define TSI148_LCSR_IT1_ITEAL 0x32C +#define TSI148_LCSR_IT1_ITOFU 0x330 +#define TSI148_LCSR_IT1_ITOFL 0x334 +#define TSI148_LCSR_IT1_ITAT 0x338 + +#define TSI148_LCSR_IT2_ITSAU 0x340 +#define TSI148_LCSR_IT2_ITSAL 0x344 +#define TSI148_LCSR_IT2_ITEAU 0x348 +#define TSI148_LCSR_IT2_ITEAL 0x34C +#define TSI148_LCSR_IT2_ITOFU 0x350 +#define TSI148_LCSR_IT2_ITOFL 0x354 +#define TSI148_LCSR_IT2_ITAT 0x358 + +#define TSI148_LCSR_IT3_ITSAU 0x360 +#define TSI148_LCSR_IT3_ITSAL 0x364 +#define TSI148_LCSR_IT3_ITEAU 0x368 +#define TSI148_LCSR_IT3_ITEAL 0x36C +#define TSI148_LCSR_IT3_ITOFU 0x370 +#define TSI148_LCSR_IT3_ITOFL 0x374 +#define TSI148_LCSR_IT3_ITAT 0x378 + +#define TSI148_LCSR_IT4_ITSAU 0x380 +#define TSI148_LCSR_IT4_ITSAL 0x384 +#define TSI148_LCSR_IT4_ITEAU 0x388 +#define TSI148_LCSR_IT4_ITEAL 0x38C +#define TSI148_LCSR_IT4_ITOFU 0x390 +#define TSI148_LCSR_IT4_ITOFL 0x394 +#define TSI148_LCSR_IT4_ITAT 0x398 + +#define TSI148_LCSR_IT5_ITSAU 0x3A0 +#define TSI148_LCSR_IT5_ITSAL 0x3A4 +#define TSI148_LCSR_IT5_ITEAU 0x3A8 +#define TSI148_LCSR_IT5_ITEAL 0x3AC +#define TSI148_LCSR_IT5_ITOFU 0x3B0 +#define TSI148_LCSR_IT5_ITOFL 0x3B4 +#define TSI148_LCSR_IT5_ITAT 0x3B8 + +#define TSI148_LCSR_IT6_ITSAU 0x3C0 +#define TSI148_LCSR_IT6_ITSAL 0x3C4 +#define TSI148_LCSR_IT6_ITEAU 0x3C8 +#define TSI148_LCSR_IT6_ITEAL 0x3CC +#define TSI148_LCSR_IT6_ITOFU 0x3D0 +#define TSI148_LCSR_IT6_ITOFL 0x3D4 +#define TSI148_LCSR_IT6_ITAT 0x3D8 + +#define TSI148_LCSR_IT7_ITSAU 0x3E0 +#define TSI148_LCSR_IT7_ITSAL 0x3E4 +#define TSI148_LCSR_IT7_ITEAU 0x3E8 +#define TSI148_LCSR_IT7_ITEAL 0x3EC +#define TSI148_LCSR_IT7_ITOFU 0x3F0 +#define TSI148_LCSR_IT7_ITOFL 0x3F4 +#define TSI148_LCSR_IT7_ITAT 0x3F8 + + +#define TSI148_LCSR_IT0 0x300 +#define TSI148_LCSR_IT1 0x320 +#define TSI148_LCSR_IT2 0x340 +#define TSI148_LCSR_IT3 0x360 +#define TSI148_LCSR_IT4 0x380 +#define TSI148_LCSR_IT5 0x3A0 +#define TSI148_LCSR_IT6 0x3C0 +#define TSI148_LCSR_IT7 0x3E0 + +static const int TSI148_LCSR_IT[8] = { TSI148_LCSR_IT0, TSI148_LCSR_IT1, + TSI148_LCSR_IT2, TSI148_LCSR_IT3, + TSI148_LCSR_IT4, TSI148_LCSR_IT5, + TSI148_LCSR_IT6, TSI148_LCSR_IT7 }; + +#define TSI148_LCSR_OFFSET_ITSAU 0x0 +#define TSI148_LCSR_OFFSET_ITSAL 0x4 +#define TSI148_LCSR_OFFSET_ITEAU 0x8 +#define TSI148_LCSR_OFFSET_ITEAL 0xC +#define TSI148_LCSR_OFFSET_ITOFU 0x10 +#define TSI148_LCSR_OFFSET_ITOFL 0x14 +#define TSI148_LCSR_OFFSET_ITAT 0x18 + + /* + * Inbound Translation GCSR + * offset 400 + */ +#define TSI148_LCSR_GBAU 0x400 +#define TSI148_LCSR_GBAL 0x404 +#define TSI148_LCSR_GCSRAT 0x408 + + /* + * Inbound Translation CRG + * offset 40C + */ +#define TSI148_LCSR_CBAU 0x40C +#define TSI148_LCSR_CBAL 0x410 +#define TSI148_LCSR_CSRAT 0x414 + + /* + * Inbound Translation CR/CSR + * CRG + * offset 418 + */ +#define TSI148_LCSR_CROU 0x418 +#define TSI148_LCSR_CROL 0x41C +#define TSI148_LCSR_CRAT 0x420 + + /* + * Inbound Translation Location Monitor + * offset 424 + */ +#define TSI148_LCSR_LMBAU 0x424 +#define TSI148_LCSR_LMBAL 0x428 +#define TSI148_LCSR_LMAT 0x42C + + /* + * VMEbus Interrupt Control. + * offset 430 + */ +#define TSI148_LCSR_BCU 0x430 +#define TSI148_LCSR_BCL 0x434 +#define TSI148_LCSR_BPGTR 0x438 +#define TSI148_LCSR_BPCTR 0x43C +#define TSI148_LCSR_VICR 0x440 + + /* + * Local Bus Interrupt Control. + * offset 448 + */ +#define TSI148_LCSR_INTEN 0x448 +#define TSI148_LCSR_INTEO 0x44C +#define TSI148_LCSR_INTS 0x450 +#define TSI148_LCSR_INTC 0x454 +#define TSI148_LCSR_INTM1 0x458 +#define TSI148_LCSR_INTM2 0x45C + + /* + * DMA Controllers + * offset 500 + */ +#define TSI148_LCSR_DCTL0 0x500 +#define TSI148_LCSR_DSTA0 0x504 +#define TSI148_LCSR_DCSAU0 0x508 +#define TSI148_LCSR_DCSAL0 0x50C +#define TSI148_LCSR_DCDAU0 0x510 +#define TSI148_LCSR_DCDAL0 0x514 +#define TSI148_LCSR_DCLAU0 0x518 +#define TSI148_LCSR_DCLAL0 0x51C +#define TSI148_LCSR_DSAU0 0x520 +#define TSI148_LCSR_DSAL0 0x524 +#define TSI148_LCSR_DDAU0 0x528 +#define TSI148_LCSR_DDAL0 0x52C +#define TSI148_LCSR_DSAT0 0x530 +#define TSI148_LCSR_DDAT0 0x534 +#define TSI148_LCSR_DNLAU0 0x538 +#define TSI148_LCSR_DNLAL0 0x53C +#define TSI148_LCSR_DCNT0 0x540 +#define TSI148_LCSR_DDBS0 0x544 + +#define TSI148_LCSR_DCTL1 0x580 +#define TSI148_LCSR_DSTA1 0x584 +#define TSI148_LCSR_DCSAU1 0x588 +#define TSI148_LCSR_DCSAL1 0x58C +#define TSI148_LCSR_DCDAU1 0x590 +#define TSI148_LCSR_DCDAL1 0x594 +#define TSI148_LCSR_DCLAU1 0x598 +#define TSI148_LCSR_DCLAL1 0x59C +#define TSI148_LCSR_DSAU1 0x5A0 +#define TSI148_LCSR_DSAL1 0x5A4 +#define TSI148_LCSR_DDAU1 0x5A8 +#define TSI148_LCSR_DDAL1 0x5AC +#define TSI148_LCSR_DSAT1 0x5B0 +#define TSI148_LCSR_DDAT1 0x5B4 +#define TSI148_LCSR_DNLAU1 0x5B8 +#define TSI148_LCSR_DNLAL1 0x5BC +#define TSI148_LCSR_DCNT1 0x5C0 +#define TSI148_LCSR_DDBS1 0x5C4 + +#define TSI148_LCSR_DMA0 0x500 +#define TSI148_LCSR_DMA1 0x580 + + +static const int TSI148_LCSR_DMA[TSI148_MAX_DMA] = { TSI148_LCSR_DMA0, + TSI148_LCSR_DMA1 }; + +#define TSI148_LCSR_OFFSET_DCTL 0x0 +#define TSI148_LCSR_OFFSET_DSTA 0x4 +#define TSI148_LCSR_OFFSET_DCSAU 0x8 +#define TSI148_LCSR_OFFSET_DCSAL 0xC +#define TSI148_LCSR_OFFSET_DCDAU 0x10 +#define TSI148_LCSR_OFFSET_DCDAL 0x14 +#define TSI148_LCSR_OFFSET_DCLAU 0x18 +#define TSI148_LCSR_OFFSET_DCLAL 0x1C +#define TSI148_LCSR_OFFSET_DSAU 0x20 +#define TSI148_LCSR_OFFSET_DSAL 0x24 +#define TSI148_LCSR_OFFSET_DDAU 0x28 +#define TSI148_LCSR_OFFSET_DDAL 0x2C +#define TSI148_LCSR_OFFSET_DSAT 0x30 +#define TSI148_LCSR_OFFSET_DDAT 0x34 +#define TSI148_LCSR_OFFSET_DNLAU 0x38 +#define TSI148_LCSR_OFFSET_DNLAL 0x3C +#define TSI148_LCSR_OFFSET_DCNT 0x40 +#define TSI148_LCSR_OFFSET_DDBS 0x44 + + /* + * GCSR Register Group + */ + + /* + * GCSR CRG + * offset 00 600 - DEVI/VENI + * offset 04 604 - CTRL/GA/REVID + * offset 08 608 - Semaphore3/2/1/0 + * offset 0C 60C - Seamphore7/6/5/4 + */ +#define TSI148_GCSR_ID 0x600 +#define TSI148_GCSR_CSR 0x604 +#define TSI148_GCSR_SEMA0 0x608 +#define TSI148_GCSR_SEMA1 0x60C + + /* + * Mail Box + * GCSR CRG + * offset 10 610 - Mailbox0 + */ +#define TSI148_GCSR_MBOX0 0x610 +#define TSI148_GCSR_MBOX1 0x614 +#define TSI148_GCSR_MBOX2 0x618 +#define TSI148_GCSR_MBOX3 0x61C + +static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0, + TSI148_GCSR_MBOX1, + TSI148_GCSR_MBOX2, + TSI148_GCSR_MBOX3 }; + + /* + * CR/CSR + */ + + /* + * CR/CSR CRG + * offset 7FFF4 FF4 - CSRBCR + * offset 7FFF8 FF8 - CSRBSR + * offset 7FFFC FFC - CBAR + */ +#define TSI148_CSRBCR 0xFF4 +#define TSI148_CSRBSR 0xFF8 +#define TSI148_CBAR 0xFFC + + + + + /* + * TSI148 Register Bit Definitions + */ + + /* + * PFCS Register Set + */ +#define TSI148_PCFS_CMMD_SERR (1<<8) /* SERR_L out pin ssys err */ +#define TSI148_PCFS_CMMD_PERR (1<<6) /* PERR_L out pin parity */ +#define TSI148_PCFS_CMMD_MSTR (1<<2) /* PCI bus master */ +#define TSI148_PCFS_CMMD_MEMSP (1<<1) /* PCI mem space access */ +#define TSI148_PCFS_CMMD_IOSP (1<<0) /* PCI I/O space enable */ + +#define TSI148_PCFS_STAT_RCPVE (1<<15) /* Detected Parity Error */ +#define TSI148_PCFS_STAT_SIGSE (1<<14) /* Signalled System Error */ +#define TSI148_PCFS_STAT_RCVMA (1<<13) /* Received Master Abort */ +#define TSI148_PCFS_STAT_RCVTA (1<<12) /* Received Target Abort */ +#define TSI148_PCFS_STAT_SIGTA (1<<11) /* Signalled Target Abort */ +#define TSI148_PCFS_STAT_SELTIM (3<<9) /* DELSEL Timing */ +#define TSI148_PCFS_STAT_DPAR (1<<8) /* Data Parity Err Reported */ +#define TSI148_PCFS_STAT_FAST (1<<7) /* Fast back-to-back Cap */ +#define TSI148_PCFS_STAT_P66M (1<<5) /* 66 MHz Capable */ +#define TSI148_PCFS_STAT_CAPL (1<<4) /* Capab List - address $34 */ + +/* + * Revision ID/Class Code Registers (CRG +$008) + */ +#define TSI148_PCFS_CLAS_M (0xFF<<24) /* Class ID */ +#define TSI148_PCFS_SUBCLAS_M (0xFF<<16) /* Sub-Class ID */ +#define TSI148_PCFS_PROGIF_M (0xFF<<8) /* Sub-Class ID */ +#define TSI148_PCFS_REVID_M (0xFF<<0) /* Rev ID */ + +/* + * Cache Line Size/ Master Latency Timer/ Header Type Registers (CRG + $00C) + */ +#define TSI148_PCFS_HEAD_M (0xFF<<16) /* Master Lat Timer */ +#define TSI148_PCFS_MLAT_M (0xFF<<8) /* Master Lat Timer */ +#define TSI148_PCFS_CLSZ_M (0xFF<<0) /* Cache Line Size */ + +/* + * Memory Base Address Lower Reg (CRG + $010) + */ +#define TSI148_PCFS_MBARL_BASEL_M (0xFFFFF<<12) /* Base Addr Lower Mask */ +#define TSI148_PCFS_MBARL_PRE (1<<3) /* Prefetch */ +#define TSI148_PCFS_MBARL_MTYPE_M (3<<1) /* Memory Type Mask */ +#define TSI148_PCFS_MBARL_IOMEM (1<<0) /* I/O Space Indicator */ + +/* + * Message Signaled Interrupt Capabilities Register (CRG + $040) + */ +#define TSI148_PCFS_MSICAP_64BAC (1<<7) /* 64-bit Address Capable */ +#define TSI148_PCFS_MSICAP_MME_M (7<<4) /* Multiple Msg Enable Mask */ +#define TSI148_PCFS_MSICAP_MMC_M (7<<1) /* Multiple Msg Capable Mask */ +#define TSI148_PCFS_MSICAP_MSIEN (1<<0) /* Msg signaled INT Enable */ + +/* + * Message Address Lower Register (CRG +$044) + */ +#define TSI148_PCFS_MSIAL_M (0x3FFFFFFF<<2) /* Mask */ + +/* + * Message Data Register (CRG + 4C) + */ +#define TSI148_PCFS_MSIMD_M (0xFFFF<<0) /* Mask */ + +/* + * PCI-X Capabilities Register (CRG + $050) + */ +#define TSI148_PCFS_PCIXCAP_MOST_M (7<<4) /* Max outstanding Split Tran */ +#define TSI148_PCFS_PCIXCAP_MMRBC_M (3<<2) /* Max Mem Read byte cnt */ +#define TSI148_PCFS_PCIXCAP_ERO (1<<1) /* Enable Relaxed Ordering */ +#define TSI148_PCFS_PCIXCAP_DPERE (1<<0) /* Data Parity Recover Enable */ + +/* + * PCI-X Status Register (CRG +$054) + */ +#define TSI148_PCFS_PCIXSTAT_RSCEM (1<<29) /* Received Split Comp Error */ +#define TSI148_PCFS_PCIXSTAT_DMCRS_M (7<<26) /* max Cumulative Read Size */ +#define TSI148_PCFS_PCIXSTAT_DMOST_M (7<<23) /* max outstanding Split Trans + */ +#define TSI148_PCFS_PCIXSTAT_DMMRC_M (3<<21) /* max mem read byte count */ +#define TSI148_PCFS_PCIXSTAT_DC (1<<20) /* Device Complexity */ +#define TSI148_PCFS_PCIXSTAT_USC (1<<19) /* Unexpected Split comp */ +#define TSI148_PCFS_PCIXSTAT_SCD (1<<18) /* Split completion discard */ +#define TSI148_PCFS_PCIXSTAT_133C (1<<17) /* 133MHz capable */ +#define TSI148_PCFS_PCIXSTAT_64D (1<<16) /* 64 bit device */ +#define TSI148_PCFS_PCIXSTAT_BN_M (0xFF<<8) /* Bus number */ +#define TSI148_PCFS_PCIXSTAT_DN_M (0x1F<<3) /* Device number */ +#define TSI148_PCFS_PCIXSTAT_FN_M (7<<0) /* Function Number */ + +/* + * LCSR Registers + */ + +/* + * Outbound Translation Starting Address Lower + */ +#define TSI148_LCSR_OTSAL_M (0xFFFF<<16) /* Mask */ + +/* + * Outbound Translation Ending Address Lower + */ +#define TSI148_LCSR_OTEAL_M (0xFFFF<<16) /* Mask */ + +/* + * Outbound Translation Offset Lower + */ +#define TSI148_LCSR_OTOFFL_M (0xFFFF<<16) /* Mask */ + +/* + * Outbound Translation 2eSST Broadcast Select + */ +#define TSI148_LCSR_OTBS_M (0xFFFFF<<0) /* Mask */ + +/* + * Outbound Translation Attribute + */ +#define TSI148_LCSR_OTAT_EN (1<<31) /* Window Enable */ +#define TSI148_LCSR_OTAT_MRPFD (1<<18) /* Prefetch Disable */ + +#define TSI148_LCSR_OTAT_PFS_M (3<<16) /* Prefetch Size Mask */ +#define TSI148_LCSR_OTAT_PFS_2 (0<<16) /* 2 Cache Lines P Size */ +#define TSI148_LCSR_OTAT_PFS_4 (1<<16) /* 4 Cache Lines P Size */ +#define TSI148_LCSR_OTAT_PFS_8 (2<<16) /* 8 Cache Lines P Size */ +#define TSI148_LCSR_OTAT_PFS_16 (3<<16) /* 16 Cache Lines P Size */ + +#define TSI148_LCSR_OTAT_2eSSTM_M (7<<11) /* 2eSST Xfer Rate Mask */ +#define TSI148_LCSR_OTAT_2eSSTM_160 (0<<11) /* 160MB/s 2eSST Xfer Rate */ +#define TSI148_LCSR_OTAT_2eSSTM_267 (1<<11) /* 267MB/s 2eSST Xfer Rate */ +#define TSI148_LCSR_OTAT_2eSSTM_320 (2<<11) /* 320MB/s 2eSST Xfer Rate */ + +#define TSI148_LCSR_OTAT_TM_M (7<<8) /* Xfer Protocol Mask */ +#define TSI148_LCSR_OTAT_TM_SCT (0<<8) /* SCT Xfer Protocol */ +#define TSI148_LCSR_OTAT_TM_BLT (1<<8) /* BLT Xfer Protocol */ +#define TSI148_LCSR_OTAT_TM_MBLT (2<<8) /* MBLT Xfer Protocol */ +#define TSI148_LCSR_OTAT_TM_2eVME (3<<8) /* 2eVME Xfer Protocol */ +#define TSI148_LCSR_OTAT_TM_2eSST (4<<8) /* 2eSST Xfer Protocol */ +#define TSI148_LCSR_OTAT_TM_2eSSTB (5<<8) /* 2eSST Bcast Xfer Protocol */ + +#define TSI148_LCSR_OTAT_DBW_M (3<<6) /* Max Data Width */ +#define TSI148_LCSR_OTAT_DBW_16 (0<<6) /* 16-bit Data Width */ +#define TSI148_LCSR_OTAT_DBW_32 (1<<6) /* 32-bit Data Width */ + +#define TSI148_LCSR_OTAT_SUP (1<<5) /* Supervisory Access */ +#define TSI148_LCSR_OTAT_PGM (1<<4) /* Program Access */ + +#define TSI148_LCSR_OTAT_AMODE_M (0xf<<0) /* Address Mode Mask */ +#define TSI148_LCSR_OTAT_AMODE_A16 (0<<0) /* A16 Address Space */ +#define TSI148_LCSR_OTAT_AMODE_A24 (1<<0) /* A24 Address Space */ +#define TSI148_LCSR_OTAT_AMODE_A32 (2<<0) /* A32 Address Space */ +#define TSI148_LCSR_OTAT_AMODE_A64 (4<<0) /* A32 Address Space */ +#define TSI148_LCSR_OTAT_AMODE_CRCSR (5<<0) /* CR/CSR Address Space */ +#define TSI148_LCSR_OTAT_AMODE_USER1 (8<<0) /* User1 Address Space */ +#define TSI148_LCSR_OTAT_AMODE_USER2 (9<<0) /* User2 Address Space */ +#define TSI148_LCSR_OTAT_AMODE_USER3 (10<<0) /* User3 Address Space */ +#define TSI148_LCSR_OTAT_AMODE_USER4 (11<<0) /* User4 Address Space */ + +/* + * VME Master Control Register CRG+$234 + */ +#define TSI148_LCSR_VMCTRL_VSA (1<<27) /* VMEbus Stop Ack */ +#define TSI148_LCSR_VMCTRL_VS (1<<26) /* VMEbus Stop */ +#define TSI148_LCSR_VMCTRL_DHB (1<<25) /* Device Has Bus */ +#define TSI148_LCSR_VMCTRL_DWB (1<<24) /* Device Wants Bus */ + +#define TSI148_LCSR_VMCTRL_RMWEN (1<<20) /* RMW Enable */ + +#define TSI148_LCSR_VMCTRL_ATO_M (7<<16) /* Master Access Time-out Mask + */ +#define TSI148_LCSR_VMCTRL_ATO_32 (0<<16) /* 32 us */ +#define TSI148_LCSR_VMCTRL_ATO_128 (1<<16) /* 128 us */ +#define TSI148_LCSR_VMCTRL_ATO_512 (2<<16) /* 512 us */ +#define TSI148_LCSR_VMCTRL_ATO_2M (3<<16) /* 2 ms */ +#define TSI148_LCSR_VMCTRL_ATO_8M (4<<16) /* 8 ms */ +#define TSI148_LCSR_VMCTRL_ATO_32M (5<<16) /* 32 ms */ +#define TSI148_LCSR_VMCTRL_ATO_128M (6<<16) /* 128 ms */ +#define TSI148_LCSR_VMCTRL_ATO_DIS (7<<16) /* Disabled */ + +#define TSI148_LCSR_VMCTRL_VTOFF_M (7<<12) /* VMEbus Master Time off */ +#define TSI148_LCSR_VMCTRL_VTOFF_0 (0<<12) /* 0us */ +#define TSI148_LCSR_VMCTRL_VTOFF_1 (1<<12) /* 1us */ +#define TSI148_LCSR_VMCTRL_VTOFF_2 (2<<12) /* 2us */ +#define TSI148_LCSR_VMCTRL_VTOFF_4 (3<<12) /* 4us */ +#define TSI148_LCSR_VMCTRL_VTOFF_8 (4<<12) /* 8us */ +#define TSI148_LCSR_VMCTRL_VTOFF_16 (5<<12) /* 16us */ +#define TSI148_LCSR_VMCTRL_VTOFF_32 (6<<12) /* 32us */ +#define TSI148_LCSR_VMCTRL_VTOFF_64 (7<<12) /* 64us */ + +#define TSI148_LCSR_VMCTRL_VTON_M (7<<8) /* VMEbus Master Time On */ +#define TSI148_LCSR_VMCTRL_VTON_4 (0<<8) /* 8us */ +#define TSI148_LCSR_VMCTRL_VTON_8 (1<<8) /* 8us */ +#define TSI148_LCSR_VMCTRL_VTON_16 (2<<8) /* 16us */ +#define TSI148_LCSR_VMCTRL_VTON_32 (3<<8) /* 32us */ +#define TSI148_LCSR_VMCTRL_VTON_64 (4<<8) /* 64us */ +#define TSI148_LCSR_VMCTRL_VTON_128 (5<<8) /* 128us */ +#define TSI148_LCSR_VMCTRL_VTON_256 (6<<8) /* 256us */ +#define TSI148_LCSR_VMCTRL_VTON_512 (7<<8) /* 512us */ + +#define TSI148_LCSR_VMCTRL_VREL_M (3<<3) /* VMEbus Master Rel Mode Mask + */ +#define TSI148_LCSR_VMCTRL_VREL_T_D (0<<3) /* Time on or Done */ +#define TSI148_LCSR_VMCTRL_VREL_T_R_D (1<<3) /* Time on and REQ or Done */ +#define TSI148_LCSR_VMCTRL_VREL_T_B_D (2<<3) /* Time on and BCLR or Done */ +#define TSI148_LCSR_VMCTRL_VREL_T_D_R (3<<3) /* Time on or Done and REQ */ + +#define TSI148_LCSR_VMCTRL_VFAIR (1<<2) /* VMEbus Master Fair Mode */ +#define TSI148_LCSR_VMCTRL_VREQL_M (3<<0) /* VMEbus Master Req Level Mask + */ + +/* + * VMEbus Control Register CRG+$238 + */ +#define TSI148_LCSR_VCTRL_LRE (1<<31) /* Late Retry Enable */ + +#define TSI148_LCSR_VCTRL_DLT_M (0xF<<24) /* Deadlock Timer */ +#define TSI148_LCSR_VCTRL_DLT_OFF (0<<24) /* Deadlock Timer Off */ +#define TSI148_LCSR_VCTRL_DLT_16 (1<<24) /* 16 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_32 (2<<24) /* 32 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_64 (3<<24) /* 64 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_128 (4<<24) /* 128 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_256 (5<<24) /* 256 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_512 (6<<24) /* 512 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_1024 (7<<24) /* 1024 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_2048 (8<<24) /* 2048 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_4096 (9<<24) /* 4096 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_8192 (0xA<<24) /* 8192 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_16384 (0xB<<24) /* 16384 VCLKS */ +#define TSI148_LCSR_VCTRL_DLT_32768 (0xC<<24) /* 32768 VCLKS */ + +#define TSI148_LCSR_VCTRL_NERBB (1<<20) /* No Early Release of Bus Busy + */ + +#define TSI148_LCSR_VCTRL_SRESET (1<<17) /* System Reset */ +#define TSI148_LCSR_VCTRL_LRESET (1<<16) /* Local Reset */ + +#define TSI148_LCSR_VCTRL_SFAILAI (1<<15) /* SYSFAIL Auto Slot ID */ +#define TSI148_LCSR_VCTRL_BID_M (0x1F<<8) /* Broadcast ID Mask */ + +#define TSI148_LCSR_VCTRL_ATOEN (1<<7) /* Arbiter Time-out Enable */ +#define TSI148_LCSR_VCTRL_ROBIN (1<<6) /* VMEbus Round Robin */ + +#define TSI148_LCSR_VCTRL_GTO_M (7<<0) /* VMEbus Global Time-out Mask + */ +#define TSI148_LCSR_VCTRL_GTO_8 (0<<0) /* 8 us */ +#define TSI148_LCSR_VCTRL_GTO_16 (1<<0) /* 16 us */ +#define TSI148_LCSR_VCTRL_GTO_32 (2<<0) /* 32 us */ +#define TSI148_LCSR_VCTRL_GTO_64 (3<<0) /* 64 us */ +#define TSI148_LCSR_VCTRL_GTO_128 (4<<0) /* 128 us */ +#define TSI148_LCSR_VCTRL_GTO_256 (5<<0) /* 256 us */ +#define TSI148_LCSR_VCTRL_GTO_512 (6<<0) /* 512 us */ +#define TSI148_LCSR_VCTRL_GTO_DIS (7<<0) /* Disabled */ + +/* + * VMEbus Status Register CRG + $23C + */ +#define TSI148_LCSR_VSTAT_CPURST (1<<15) /* Clear power up reset */ +#define TSI148_LCSR_VSTAT_BRDFL (1<<14) /* Board fail */ +#define TSI148_LCSR_VSTAT_PURSTS (1<<12) /* Power up reset status */ +#define TSI148_LCSR_VSTAT_BDFAILS (1<<11) /* Board Fail Status */ +#define TSI148_LCSR_VSTAT_SYSFAILS (1<<10) /* System Fail Status */ +#define TSI148_LCSR_VSTAT_ACFAILS (1<<9) /* AC fail status */ +#define TSI148_LCSR_VSTAT_SCONS (1<<8) /* System Cont Status */ +#define TSI148_LCSR_VSTAT_GAP (1<<5) /* Geographic Addr Parity */ +#define TSI148_LCSR_VSTAT_GA_M (0x1F<<0) /* Geographic Addr Mask */ + +/* + * PCI Configuration Status Register CRG+$240 + */ +#define TSI148_LCSR_PSTAT_REQ64S (1<<6) /* Request 64 status set */ +#define TSI148_LCSR_PSTAT_M66ENS (1<<5) /* M66ENS 66Mhz enable */ +#define TSI148_LCSR_PSTAT_FRAMES (1<<4) /* Frame Status */ +#define TSI148_LCSR_PSTAT_IRDYS (1<<3) /* IRDY status */ +#define TSI148_LCSR_PSTAT_DEVSELS (1<<2) /* DEVL status */ +#define TSI148_LCSR_PSTAT_STOPS (1<<1) /* STOP status */ +#define TSI148_LCSR_PSTAT_TRDYS (1<<0) /* TRDY status */ + +/* + * VMEbus Exception Attributes Register CRG + $268 + */ +#define TSI148_LCSR_VEAT_VES (1<<31) /* Status */ +#define TSI148_LCSR_VEAT_VEOF (1<<30) /* Overflow */ +#define TSI148_LCSR_VEAT_VESCL (1<<29) /* Status Clear */ +#define TSI148_LCSR_VEAT_2EOT (1<<21) /* 2e Odd Termination */ +#define TSI148_LCSR_VEAT_2EST (1<<20) /* 2e Slave terminated */ +#define TSI148_LCSR_VEAT_BERR (1<<19) /* Bus Error */ +#define TSI148_LCSR_VEAT_LWORD (1<<18) /* LWORD_ signal state */ +#define TSI148_LCSR_VEAT_WRITE (1<<17) /* WRITE_ signal state */ +#define TSI148_LCSR_VEAT_IACK (1<<16) /* IACK_ signal state */ +#define TSI148_LCSR_VEAT_DS1 (1<<15) /* DS1_ signal state */ +#define TSI148_LCSR_VEAT_DS0 (1<<14) /* DS0_ signal state */ +#define TSI148_LCSR_VEAT_AM_M (0x3F<<8) /* Address Mode Mask */ +#define TSI148_LCSR_VEAT_XAM_M (0xFF<<0) /* Master AMode Mask */ + + +/* + * VMEbus PCI Error Diagnostics PCI/X Attributes Register CRG + $280 + */ +#define TSI148_LCSR_EDPAT_EDPCL (1<<29) + +/* + * Inbound Translation Starting Address Lower + */ +#define TSI148_LCSR_ITSAL6432_M (0xFFFF<<16) /* Mask */ +#define TSI148_LCSR_ITSAL24_M (0x00FFF<<12) /* Mask */ +#define TSI148_LCSR_ITSAL16_M (0x0000FFF<<4) /* Mask */ + +/* + * Inbound Translation Ending Address Lower + */ +#define TSI148_LCSR_ITEAL6432_M (0xFFFF<<16) /* Mask */ +#define TSI148_LCSR_ITEAL24_M (0x00FFF<<12) /* Mask */ +#define TSI148_LCSR_ITEAL16_M (0x0000FFF<<4) /* Mask */ + +/* + * Inbound Translation Offset Lower + */ +#define TSI148_LCSR_ITOFFL6432_M (0xFFFF<<16) /* Mask */ +#define TSI148_LCSR_ITOFFL24_M (0xFFFFF<<12) /* Mask */ +#define TSI148_LCSR_ITOFFL16_M (0xFFFFFFF<<4) /* Mask */ + +/* + * Inbound Translation Attribute + */ +#define TSI148_LCSR_ITAT_EN (1<<31) /* Window Enable */ +#define TSI148_LCSR_ITAT_TH (1<<18) /* Prefetch Threshold */ + +#define TSI148_LCSR_ITAT_VFS_M (3<<16) /* Virtual FIFO Size Mask */ +#define TSI148_LCSR_ITAT_VFS_64 (0<<16) /* 64 bytes Virtual FIFO Size */ +#define TSI148_LCSR_ITAT_VFS_128 (1<<16) /* 128 bytes Virtual FIFO Sz */ +#define TSI148_LCSR_ITAT_VFS_256 (2<<16) /* 256 bytes Virtual FIFO Sz */ +#define TSI148_LCSR_ITAT_VFS_512 (3<<16) /* 512 bytes Virtual FIFO Sz */ + +#define TSI148_LCSR_ITAT_2eSSTM_M (7<<12) /* 2eSST Xfer Rate Mask */ +#define TSI148_LCSR_ITAT_2eSSTM_160 (0<<12) /* 160MB/s 2eSST Xfer Rate */ +#define TSI148_LCSR_ITAT_2eSSTM_267 (1<<12) /* 267MB/s 2eSST Xfer Rate */ +#define TSI148_LCSR_ITAT_2eSSTM_320 (2<<12) /* 320MB/s 2eSST Xfer Rate */ + +#define TSI148_LCSR_ITAT_2eSSTB (1<<11) /* 2eSST Bcast Xfer Protocol */ +#define TSI148_LCSR_ITAT_2eSST (1<<10) /* 2eSST Xfer Protocol */ +#define TSI148_LCSR_ITAT_2eVME (1<<9) /* 2eVME Xfer Protocol */ +#define TSI148_LCSR_ITAT_MBLT (1<<8) /* MBLT Xfer Protocol */ +#define TSI148_LCSR_ITAT_BLT (1<<7) /* BLT Xfer Protocol */ + +#define TSI148_LCSR_ITAT_AS_M (7<<4) /* Address Space Mask */ +#define TSI148_LCSR_ITAT_AS_A16 (0<<4) /* A16 Address Space */ +#define TSI148_LCSR_ITAT_AS_A24 (1<<4) /* A24 Address Space */ +#define TSI148_LCSR_ITAT_AS_A32 (2<<4) /* A32 Address Space */ +#define TSI148_LCSR_ITAT_AS_A64 (4<<4) /* A64 Address Space */ + +#define TSI148_LCSR_ITAT_SUPR (1<<3) /* Supervisor Access */ +#define TSI148_LCSR_ITAT_NPRIV (1<<2) /* Non-Priv (User) Access */ +#define TSI148_LCSR_ITAT_PGM (1<<1) /* Program Access */ +#define TSI148_LCSR_ITAT_DATA (1<<0) /* Data Access */ + +/* + * GCSR Base Address Lower Address CRG +$404 + */ +#define TSI148_LCSR_GBAL_M (0x7FFFFFF<<5) /* Mask */ + +/* + * GCSR Attribute Register CRG + $408 + */ +#define TSI148_LCSR_GCSRAT_EN (1<<7) /* Enable access to GCSR */ + +#define TSI148_LCSR_GCSRAT_AS_M (7<<4) /* Address Space Mask */ +#define TSI148_LCSR_GCSRAT_AS_A16 (0<<4) /* Address Space 16 */ +#define TSI148_LCSR_GCSRAT_AS_A24 (1<<4) /* Address Space 24 */ +#define TSI148_LCSR_GCSRAT_AS_A32 (2<<4) /* Address Space 32 */ +#define TSI148_LCSR_GCSRAT_AS_A64 (4<<4) /* Address Space 64 */ + +#define TSI148_LCSR_GCSRAT_SUPR (1<<3) /* Sup set -GCSR decoder */ +#define TSI148_LCSR_GCSRAT_NPRIV (1<<2) /* Non-Privliged set - CGSR */ +#define TSI148_LCSR_GCSRAT_PGM (1<<1) /* Program set - GCSR decoder */ +#define TSI148_LCSR_GCSRAT_DATA (1<<0) /* DATA set GCSR decoder */ + +/* + * CRG Base Address Lower Address CRG + $410 + */ +#define TSI148_LCSR_CBAL_M (0xFFFFF<<12) + +/* + * CRG Attribute Register CRG + $414 + */ +#define TSI148_LCSR_CRGAT_EN (1<<7) /* Enable PRG Access */ + +#define TSI148_LCSR_CRGAT_AS_M (7<<4) /* Address Space */ +#define TSI148_LCSR_CRGAT_AS_A16 (0<<4) /* Address Space 16 */ +#define TSI148_LCSR_CRGAT_AS_A24 (1<<4) /* Address Space 24 */ +#define TSI148_LCSR_CRGAT_AS_A32 (2<<4) /* Address Space 32 */ +#define TSI148_LCSR_CRGAT_AS_A64 (4<<4) /* Address Space 64 */ + +#define TSI148_LCSR_CRGAT_SUPR (1<<3) /* Supervisor Access */ +#define TSI148_LCSR_CRGAT_NPRIV (1<<2) /* Non-Privliged(User) Access */ +#define TSI148_LCSR_CRGAT_PGM (1<<1) /* Program Access */ +#define TSI148_LCSR_CRGAT_DATA (1<<0) /* Data Access */ + +/* + * CR/CSR Offset Lower Register CRG + $41C + */ +#define TSI148_LCSR_CROL_M (0x1FFF<<19) /* Mask */ + +/* + * CR/CSR Attribute register CRG + $420 + */ +#define TSI148_LCSR_CRAT_EN (1<<7) /* Enable access to CR/CSR */ + +/* + * Location Monitor base address lower register CRG + $428 + */ +#define TSI148_LCSR_LMBAL_M (0x7FFFFFF<<5) /* Mask */ + +/* + * Location Monitor Attribute Register CRG + $42C + */ +#define TSI148_LCSR_LMAT_EN (1<<7) /* Enable Location Monitor */ + +#define TSI148_LCSR_LMAT_AS_M (7<<4) /* Address Space MASK */ +#define TSI148_LCSR_LMAT_AS_A16 (0<<4) /* A16 */ +#define TSI148_LCSR_LMAT_AS_A24 (1<<4) /* A24 */ +#define TSI148_LCSR_LMAT_AS_A32 (2<<4) /* A32 */ +#define TSI148_LCSR_LMAT_AS_A64 (4<<4) /* A64 */ + +#define TSI148_LCSR_LMAT_SUPR (1<<3) /* Supervisor Access */ +#define TSI148_LCSR_LMAT_NPRIV (1<<2) /* Non-Priv (User) Access */ +#define TSI148_LCSR_LMAT_PGM (1<<1) /* Program Access */ +#define TSI148_LCSR_LMAT_DATA (1<<0) /* Data Access */ + +/* + * Broadcast Pulse Generator Timer Register CRG + $438 + */ +#define TSI148_LCSR_BPGTR_BPGT_M (0xFFFF<<0) /* Mask */ + +/* + * Broadcast Programmable Clock Timer Register CRG + $43C + */ +#define TSI148_LCSR_BPCTR_BPCT_M (0xFFFFFF<<0) /* Mask */ + +/* + * VMEbus Interrupt Control Register CRG + $43C + */ +#define TSI148_LCSR_VICR_CNTS_M (3<<22) /* Cntr Source MASK */ +#define TSI148_LCSR_VICR_CNTS_DIS (1<<22) /* Cntr Disable */ +#define TSI148_LCSR_VICR_CNTS_IRQ1 (2<<22) /* IRQ1 to Cntr */ +#define TSI148_LCSR_VICR_CNTS_IRQ2 (3<<22) /* IRQ2 to Cntr */ + +#define TSI148_LCSR_VICR_EDGIS_M (3<<20) /* Edge interrupt MASK */ +#define TSI148_LCSR_VICR_EDGIS_DIS (1<<20) /* Edge interrupt Disable */ +#define TSI148_LCSR_VICR_EDGIS_IRQ1 (2<<20) /* IRQ1 to Edge */ +#define TSI148_LCSR_VICR_EDGIS_IRQ2 (3<<20) /* IRQ2 to Edge */ + +#define TSI148_LCSR_VICR_IRQIF_M (3<<18) /* IRQ1* Function MASK */ +#define TSI148_LCSR_VICR_IRQIF_NORM (1<<18) /* Normal */ +#define TSI148_LCSR_VICR_IRQIF_PULSE (2<<18) /* Pulse Generator */ +#define TSI148_LCSR_VICR_IRQIF_PROG (3<<18) /* Programmable Clock */ +#define TSI148_LCSR_VICR_IRQIF_1U (4<<18) /* 1us Clock */ + +#define TSI148_LCSR_VICR_IRQ2F_M (3<<16) /* IRQ2* Function MASK */ +#define TSI148_LCSR_VICR_IRQ2F_NORM (1<<16) /* Normal */ +#define TSI148_LCSR_VICR_IRQ2F_PULSE (2<<16) /* Pulse Generator */ +#define TSI148_LCSR_VICR_IRQ2F_PROG (3<<16) /* Programmable Clock */ +#define TSI148_LCSR_VICR_IRQ2F_1U (4<<16) /* 1us Clock */ + +#define TSI148_LCSR_VICR_BIP (1<<15) /* Broadcast Interrupt Pulse */ + +#define TSI148_LCSR_VICR_IRQC (1<<12) /* VMEbus IRQ Clear */ +#define TSI148_LCSR_VICR_IRQS (1<<11) /* VMEbus IRQ Status */ + +#define TSI148_LCSR_VICR_IRQL_M (7<<8) /* VMEbus SW IRQ Level Mask */ +#define TSI148_LCSR_VICR_IRQL_1 (1<<8) /* VMEbus SW IRQ Level 1 */ +#define TSI148_LCSR_VICR_IRQL_2 (2<<8) /* VMEbus SW IRQ Level 2 */ +#define TSI148_LCSR_VICR_IRQL_3 (3<<8) /* VMEbus SW IRQ Level 3 */ +#define TSI148_LCSR_VICR_IRQL_4 (4<<8) /* VMEbus SW IRQ Level 4 */ +#define TSI148_LCSR_VICR_IRQL_5 (5<<8) /* VMEbus SW IRQ Level 5 */ +#define TSI148_LCSR_VICR_IRQL_6 (6<<8) /* VMEbus SW IRQ Level 6 */ +#define TSI148_LCSR_VICR_IRQL_7 (7<<8) /* VMEbus SW IRQ Level 7 */ + +static const int TSI148_LCSR_VICR_IRQL[8] = { 0, TSI148_LCSR_VICR_IRQL_1, + TSI148_LCSR_VICR_IRQL_2, TSI148_LCSR_VICR_IRQL_3, + TSI148_LCSR_VICR_IRQL_4, TSI148_LCSR_VICR_IRQL_5, + TSI148_LCSR_VICR_IRQL_6, TSI148_LCSR_VICR_IRQL_7 }; + +#define TSI148_LCSR_VICR_STID_M (0xFF<<0) /* Status/ID Mask */ + +/* + * Interrupt Enable Register CRG + $440 + */ +#define TSI148_LCSR_INTEN_DMA1EN (1<<25) /* DMAC 1 */ +#define TSI148_LCSR_INTEN_DMA0EN (1<<24) /* DMAC 0 */ +#define TSI148_LCSR_INTEN_LM3EN (1<<23) /* Location Monitor 3 */ +#define TSI148_LCSR_INTEN_LM2EN (1<<22) /* Location Monitor 2 */ +#define TSI148_LCSR_INTEN_LM1EN (1<<21) /* Location Monitor 1 */ +#define TSI148_LCSR_INTEN_LM0EN (1<<20) /* Location Monitor 0 */ +#define TSI148_LCSR_INTEN_MB3EN (1<<19) /* Mail Box 3 */ +#define TSI148_LCSR_INTEN_MB2EN (1<<18) /* Mail Box 2 */ +#define TSI148_LCSR_INTEN_MB1EN (1<<17) /* Mail Box 1 */ +#define TSI148_LCSR_INTEN_MB0EN (1<<16) /* Mail Box 0 */ +#define TSI148_LCSR_INTEN_PERREN (1<<13) /* PCI/X Error */ +#define TSI148_LCSR_INTEN_VERREN (1<<12) /* VMEbus Error */ +#define TSI148_LCSR_INTEN_VIEEN (1<<11) /* VMEbus IRQ Edge */ +#define TSI148_LCSR_INTEN_IACKEN (1<<10) /* IACK */ +#define TSI148_LCSR_INTEN_SYSFLEN (1<<9) /* System Fail */ +#define TSI148_LCSR_INTEN_ACFLEN (1<<8) /* AC Fail */ +#define TSI148_LCSR_INTEN_IRQ7EN (1<<7) /* IRQ7 */ +#define TSI148_LCSR_INTEN_IRQ6EN (1<<6) /* IRQ6 */ +#define TSI148_LCSR_INTEN_IRQ5EN (1<<5) /* IRQ5 */ +#define TSI148_LCSR_INTEN_IRQ4EN (1<<4) /* IRQ4 */ +#define TSI148_LCSR_INTEN_IRQ3EN (1<<3) /* IRQ3 */ +#define TSI148_LCSR_INTEN_IRQ2EN (1<<2) /* IRQ2 */ +#define TSI148_LCSR_INTEN_IRQ1EN (1<<1) /* IRQ1 */ + +static const int TSI148_LCSR_INTEN_LMEN[4] = { TSI148_LCSR_INTEN_LM0EN, + TSI148_LCSR_INTEN_LM1EN, + TSI148_LCSR_INTEN_LM2EN, + TSI148_LCSR_INTEN_LM3EN }; + +static const int TSI148_LCSR_INTEN_IRQEN[7] = { TSI148_LCSR_INTEN_IRQ1EN, + TSI148_LCSR_INTEN_IRQ2EN, + TSI148_LCSR_INTEN_IRQ3EN, + TSI148_LCSR_INTEN_IRQ4EN, + TSI148_LCSR_INTEN_IRQ5EN, + TSI148_LCSR_INTEN_IRQ6EN, + TSI148_LCSR_INTEN_IRQ7EN }; + +/* + * Interrupt Enable Out Register CRG + $444 + */ +#define TSI148_LCSR_INTEO_DMA1EO (1<<25) /* DMAC 1 */ +#define TSI148_LCSR_INTEO_DMA0EO (1<<24) /* DMAC 0 */ +#define TSI148_LCSR_INTEO_LM3EO (1<<23) /* Loc Monitor 3 */ +#define TSI148_LCSR_INTEO_LM2EO (1<<22) /* Loc Monitor 2 */ +#define TSI148_LCSR_INTEO_LM1EO (1<<21) /* Loc Monitor 1 */ +#define TSI148_LCSR_INTEO_LM0EO (1<<20) /* Location Monitor 0 */ +#define TSI148_LCSR_INTEO_MB3EO (1<<19) /* Mail Box 3 */ +#define TSI148_LCSR_INTEO_MB2EO (1<<18) /* Mail Box 2 */ +#define TSI148_LCSR_INTEO_MB1EO (1<<17) /* Mail Box 1 */ +#define TSI148_LCSR_INTEO_MB0EO (1<<16) /* Mail Box 0 */ +#define TSI148_LCSR_INTEO_PERREO (1<<13) /* PCI/X Error */ +#define TSI148_LCSR_INTEO_VERREO (1<<12) /* VMEbus Error */ +#define TSI148_LCSR_INTEO_VIEEO (1<<11) /* VMEbus IRQ Edge */ +#define TSI148_LCSR_INTEO_IACKEO (1<<10) /* IACK */ +#define TSI148_LCSR_INTEO_SYSFLEO (1<<9) /* System Fail */ +#define TSI148_LCSR_INTEO_ACFLEO (1<<8) /* AC Fail */ +#define TSI148_LCSR_INTEO_IRQ7EO (1<<7) /* IRQ7 */ +#define TSI148_LCSR_INTEO_IRQ6EO (1<<6) /* IRQ6 */ +#define TSI148_LCSR_INTEO_IRQ5EO (1<<5) /* IRQ5 */ +#define TSI148_LCSR_INTEO_IRQ4EO (1<<4) /* IRQ4 */ +#define TSI148_LCSR_INTEO_IRQ3EO (1<<3) /* IRQ3 */ +#define TSI148_LCSR_INTEO_IRQ2EO (1<<2) /* IRQ2 */ +#define TSI148_LCSR_INTEO_IRQ1EO (1<<1) /* IRQ1 */ + +static const int TSI148_LCSR_INTEO_LMEO[4] = { TSI148_LCSR_INTEO_LM0EO, + TSI148_LCSR_INTEO_LM1EO, + TSI148_LCSR_INTEO_LM2EO, + TSI148_LCSR_INTEO_LM3EO }; + +static const int TSI148_LCSR_INTEO_IRQEO[7] = { TSI148_LCSR_INTEO_IRQ1EO, + TSI148_LCSR_INTEO_IRQ2EO, + TSI148_LCSR_INTEO_IRQ3EO, + TSI148_LCSR_INTEO_IRQ4EO, + TSI148_LCSR_INTEO_IRQ5EO, + TSI148_LCSR_INTEO_IRQ6EO, + TSI148_LCSR_INTEO_IRQ7EO }; + +/* + * Interrupt Status Register CRG + $448 + */ +#define TSI148_LCSR_INTS_DMA1S (1<<25) /* DMA 1 */ +#define TSI148_LCSR_INTS_DMA0S (1<<24) /* DMA 0 */ +#define TSI148_LCSR_INTS_LM3S (1<<23) /* Location Monitor 3 */ +#define TSI148_LCSR_INTS_LM2S (1<<22) /* Location Monitor 2 */ +#define TSI148_LCSR_INTS_LM1S (1<<21) /* Location Monitor 1 */ +#define TSI148_LCSR_INTS_LM0S (1<<20) /* Location Monitor 0 */ +#define TSI148_LCSR_INTS_MB3S (1<<19) /* Mail Box 3 */ +#define TSI148_LCSR_INTS_MB2S (1<<18) /* Mail Box 2 */ +#define TSI148_LCSR_INTS_MB1S (1<<17) /* Mail Box 1 */ +#define TSI148_LCSR_INTS_MB0S (1<<16) /* Mail Box 0 */ +#define TSI148_LCSR_INTS_PERRS (1<<13) /* PCI/X Error */ +#define TSI148_LCSR_INTS_VERRS (1<<12) /* VMEbus Error */ +#define TSI148_LCSR_INTS_VIES (1<<11) /* VMEbus IRQ Edge */ +#define TSI148_LCSR_INTS_IACKS (1<<10) /* IACK */ +#define TSI148_LCSR_INTS_SYSFLS (1<<9) /* System Fail */ +#define TSI148_LCSR_INTS_ACFLS (1<<8) /* AC Fail */ +#define TSI148_LCSR_INTS_IRQ7S (1<<7) /* IRQ7 */ +#define TSI148_LCSR_INTS_IRQ6S (1<<6) /* IRQ6 */ +#define TSI148_LCSR_INTS_IRQ5S (1<<5) /* IRQ5 */ +#define TSI148_LCSR_INTS_IRQ4S (1<<4) /* IRQ4 */ +#define TSI148_LCSR_INTS_IRQ3S (1<<3) /* IRQ3 */ +#define TSI148_LCSR_INTS_IRQ2S (1<<2) /* IRQ2 */ +#define TSI148_LCSR_INTS_IRQ1S (1<<1) /* IRQ1 */ + +static const int TSI148_LCSR_INTS_LMS[4] = { TSI148_LCSR_INTS_LM0S, + TSI148_LCSR_INTS_LM1S, + TSI148_LCSR_INTS_LM2S, + TSI148_LCSR_INTS_LM3S }; + +static const int TSI148_LCSR_INTS_MBS[4] = { TSI148_LCSR_INTS_MB0S, + TSI148_LCSR_INTS_MB1S, + TSI148_LCSR_INTS_MB2S, + TSI148_LCSR_INTS_MB3S }; + +/* + * Interrupt Clear Register CRG + $44C + */ +#define TSI148_LCSR_INTC_DMA1C (1<<25) /* DMA 1 */ +#define TSI148_LCSR_INTC_DMA0C (1<<24) /* DMA 0 */ +#define TSI148_LCSR_INTC_LM3C (1<<23) /* Location Monitor 3 */ +#define TSI148_LCSR_INTC_LM2C (1<<22) /* Location Monitor 2 */ +#define TSI148_LCSR_INTC_LM1C (1<<21) /* Location Monitor 1 */ +#define TSI148_LCSR_INTC_LM0C (1<<20) /* Location Monitor 0 */ +#define TSI148_LCSR_INTC_MB3C (1<<19) /* Mail Box 3 */ +#define TSI148_LCSR_INTC_MB2C (1<<18) /* Mail Box 2 */ +#define TSI148_LCSR_INTC_MB1C (1<<17) /* Mail Box 1 */ +#define TSI148_LCSR_INTC_MB0C (1<<16) /* Mail Box 0 */ +#define TSI148_LCSR_INTC_PERRC (1<<13) /* VMEbus Error */ +#define TSI148_LCSR_INTC_VERRC (1<<12) /* VMEbus Access Time-out */ +#define TSI148_LCSR_INTC_VIEC (1<<11) /* VMEbus IRQ Edge */ +#define TSI148_LCSR_INTC_IACKC (1<<10) /* IACK */ +#define TSI148_LCSR_INTC_SYSFLC (1<<9) /* System Fail */ +#define TSI148_LCSR_INTC_ACFLC (1<<8) /* AC Fail */ + +static const int TSI148_LCSR_INTC_LMC[4] = { TSI148_LCSR_INTC_LM0C, + TSI148_LCSR_INTC_LM1C, + TSI148_LCSR_INTC_LM2C, + TSI148_LCSR_INTC_LM3C }; + +static const int TSI148_LCSR_INTC_MBC[4] = { TSI148_LCSR_INTC_MB0C, + TSI148_LCSR_INTC_MB1C, + TSI148_LCSR_INTC_MB2C, + TSI148_LCSR_INTC_MB3C }; + +/* + * Interrupt Map Register 1 CRG + $458 + */ +#define TSI148_LCSR_INTM1_DMA1M_M (3<<18) /* DMA 1 */ +#define TSI148_LCSR_INTM1_DMA0M_M (3<<16) /* DMA 0 */ +#define TSI148_LCSR_INTM1_LM3M_M (3<<14) /* Location Monitor 3 */ +#define TSI148_LCSR_INTM1_LM2M_M (3<<12) /* Location Monitor 2 */ +#define TSI148_LCSR_INTM1_LM1M_M (3<<10) /* Location Monitor 1 */ +#define TSI148_LCSR_INTM1_LM0M_M (3<<8) /* Location Monitor 0 */ +#define TSI148_LCSR_INTM1_MB3M_M (3<<6) /* Mail Box 3 */ +#define TSI148_LCSR_INTM1_MB2M_M (3<<4) /* Mail Box 2 */ +#define TSI148_LCSR_INTM1_MB1M_M (3<<2) /* Mail Box 1 */ +#define TSI148_LCSR_INTM1_MB0M_M (3<<0) /* Mail Box 0 */ + +/* + * Interrupt Map Register 2 CRG + $45C + */ +#define TSI148_LCSR_INTM2_PERRM_M (3<<26) /* PCI Bus Error */ +#define TSI148_LCSR_INTM2_VERRM_M (3<<24) /* VMEbus Error */ +#define TSI148_LCSR_INTM2_VIEM_M (3<<22) /* VMEbus IRQ Edge */ +#define TSI148_LCSR_INTM2_IACKM_M (3<<20) /* IACK */ +#define TSI148_LCSR_INTM2_SYSFLM_M (3<<18) /* System Fail */ +#define TSI148_LCSR_INTM2_ACFLM_M (3<<16) /* AC Fail */ +#define TSI148_LCSR_INTM2_IRQ7M_M (3<<14) /* IRQ7 */ +#define TSI148_LCSR_INTM2_IRQ6M_M (3<<12) /* IRQ6 */ +#define TSI148_LCSR_INTM2_IRQ5M_M (3<<10) /* IRQ5 */ +#define TSI148_LCSR_INTM2_IRQ4M_M (3<<8) /* IRQ4 */ +#define TSI148_LCSR_INTM2_IRQ3M_M (3<<6) /* IRQ3 */ +#define TSI148_LCSR_INTM2_IRQ2M_M (3<<4) /* IRQ2 */ +#define TSI148_LCSR_INTM2_IRQ1M_M (3<<2) /* IRQ1 */ + +/* + * DMA Control (0-1) Registers CRG + $500 + */ +#define TSI148_LCSR_DCTL_ABT (1<<27) /* Abort */ +#define TSI148_LCSR_DCTL_PAU (1<<26) /* Pause */ +#define TSI148_LCSR_DCTL_DGO (1<<25) /* DMA Go */ + +#define TSI148_LCSR_DCTL_MOD (1<<23) /* Mode */ + +#define TSI148_LCSR_DCTL_VBKS_M (7<<12) /* VMEbus block Size MASK */ +#define TSI148_LCSR_DCTL_VBKS_32 (0<<12) /* VMEbus block Size 32 */ +#define TSI148_LCSR_DCTL_VBKS_64 (1<<12) /* VMEbus block Size 64 */ +#define TSI148_LCSR_DCTL_VBKS_128 (2<<12) /* VMEbus block Size 128 */ +#define TSI148_LCSR_DCTL_VBKS_256 (3<<12) /* VMEbus block Size 256 */ +#define TSI148_LCSR_DCTL_VBKS_512 (4<<12) /* VMEbus block Size 512 */ +#define TSI148_LCSR_DCTL_VBKS_1024 (5<<12) /* VMEbus block Size 1024 */ +#define TSI148_LCSR_DCTL_VBKS_2048 (6<<12) /* VMEbus block Size 2048 */ +#define TSI148_LCSR_DCTL_VBKS_4096 (7<<12) /* VMEbus block Size 4096 */ + +#define TSI148_LCSR_DCTL_VBOT_M (7<<8) /* VMEbus back-off MASK */ +#define TSI148_LCSR_DCTL_VBOT_0 (0<<8) /* VMEbus back-off 0us */ +#define TSI148_LCSR_DCTL_VBOT_1 (1<<8) /* VMEbus back-off 1us */ +#define TSI148_LCSR_DCTL_VBOT_2 (2<<8) /* VMEbus back-off 2us */ +#define TSI148_LCSR_DCTL_VBOT_4 (3<<8) /* VMEbus back-off 4us */ +#define TSI148_LCSR_DCTL_VBOT_8 (4<<8) /* VMEbus back-off 8us */ +#define TSI148_LCSR_DCTL_VBOT_16 (5<<8) /* VMEbus back-off 16us */ +#define TSI148_LCSR_DCTL_VBOT_32 (6<<8) /* VMEbus back-off 32us */ +#define TSI148_LCSR_DCTL_VBOT_64 (7<<8) /* VMEbus back-off 64us */ + +#define TSI148_LCSR_DCTL_PBKS_M (7<<4) /* PCI block size MASK */ +#define TSI148_LCSR_DCTL_PBKS_32 (0<<4) /* PCI block size 32 bytes */ +#define TSI148_LCSR_DCTL_PBKS_64 (1<<4) /* PCI block size 64 bytes */ +#define TSI148_LCSR_DCTL_PBKS_128 (2<<4) /* PCI block size 128 bytes */ +#define TSI148_LCSR_DCTL_PBKS_256 (3<<4) /* PCI block size 256 bytes */ +#define TSI148_LCSR_DCTL_PBKS_512 (4<<4) /* PCI block size 512 bytes */ +#define TSI148_LCSR_DCTL_PBKS_1024 (5<<4) /* PCI block size 1024 bytes */ +#define TSI148_LCSR_DCTL_PBKS_2048 (6<<4) /* PCI block size 2048 bytes */ +#define TSI148_LCSR_DCTL_PBKS_4096 (7<<4) /* PCI block size 4096 bytes */ + +#define TSI148_LCSR_DCTL_PBOT_M (7<<0) /* PCI back off MASK */ +#define TSI148_LCSR_DCTL_PBOT_0 (0<<0) /* PCI back off 0us */ +#define TSI148_LCSR_DCTL_PBOT_1 (1<<0) /* PCI back off 1us */ +#define TSI148_LCSR_DCTL_PBOT_2 (2<<0) /* PCI back off 2us */ +#define TSI148_LCSR_DCTL_PBOT_4 (3<<0) /* PCI back off 3us */ +#define TSI148_LCSR_DCTL_PBOT_8 (4<<0) /* PCI back off 4us */ +#define TSI148_LCSR_DCTL_PBOT_16 (5<<0) /* PCI back off 8us */ +#define TSI148_LCSR_DCTL_PBOT_32 (6<<0) /* PCI back off 16us */ +#define TSI148_LCSR_DCTL_PBOT_64 (7<<0) /* PCI back off 32us */ + +/* + * DMA Status Registers (0-1) CRG + $504 + */ +#define TSI148_LCSR_DSTA_SMA (1<<31) /* PCI Signalled Master Abt */ +#define TSI148_LCSR_DSTA_RTA (1<<30) /* PCI Received Target Abt */ +#define TSI148_LCSR_DSTA_MRC (1<<29) /* PCI Max Retry Count */ +#define TSI148_LCSR_DSTA_VBE (1<<28) /* VMEbus error */ +#define TSI148_LCSR_DSTA_ABT (1<<27) /* Abort */ +#define TSI148_LCSR_DSTA_PAU (1<<26) /* Pause */ +#define TSI148_LCSR_DSTA_DON (1<<25) /* Done */ +#define TSI148_LCSR_DSTA_BSY (1<<24) /* Busy */ + +/* + * DMA Current Link Address Lower (0-1) + */ +#define TSI148_LCSR_DCLAL_M (0x3FFFFFF<<6) /* Mask */ + +/* + * DMA Source Attribute (0-1) Reg + */ +#define TSI148_LCSR_DSAT_TYP_M (3<<28) /* Source Bus Type */ +#define TSI148_LCSR_DSAT_TYP_PCI (0<<28) /* PCI Bus */ +#define TSI148_LCSR_DSAT_TYP_VME (1<<28) /* VMEbus */ +#define TSI148_LCSR_DSAT_TYP_PAT (2<<28) /* Data Pattern */ + +#define TSI148_LCSR_DSAT_PSZ (1<<25) /* Pattern Size */ +#define TSI148_LCSR_DSAT_NIN (1<<24) /* No Increment */ + +#define TSI148_LCSR_DSAT_2eSSTM_M (3<<11) /* 2eSST Trans Rate Mask */ +#define TSI148_LCSR_DSAT_2eSSTM_160 (0<<11) /* 160 MB/s */ +#define TSI148_LCSR_DSAT_2eSSTM_267 (1<<11) /* 267 MB/s */ +#define TSI148_LCSR_DSAT_2eSSTM_320 (2<<11) /* 320 MB/s */ + +#define TSI148_LCSR_DSAT_TM_M (7<<8) /* Bus Transfer Protocol Mask */ +#define TSI148_LCSR_DSAT_TM_SCT (0<<8) /* SCT */ +#define TSI148_LCSR_DSAT_TM_BLT (1<<8) /* BLT */ +#define TSI148_LCSR_DSAT_TM_MBLT (2<<8) /* MBLT */ +#define TSI148_LCSR_DSAT_TM_2eVME (3<<8) /* 2eVME */ +#define TSI148_LCSR_DSAT_TM_2eSST (4<<8) /* 2eSST */ +#define TSI148_LCSR_DSAT_TM_2eSSTB (5<<8) /* 2eSST Broadcast */ + +#define TSI148_LCSR_DSAT_DBW_M (3<<6) /* Max Data Width MASK */ +#define TSI148_LCSR_DSAT_DBW_16 (0<<6) /* 16 Bits */ +#define TSI148_LCSR_DSAT_DBW_32 (1<<6) /* 32 Bits */ + +#define TSI148_LCSR_DSAT_SUP (1<<5) /* Supervisory Mode */ +#define TSI148_LCSR_DSAT_PGM (1<<4) /* Program Mode */ + +#define TSI148_LCSR_DSAT_AMODE_M (0xf<<0) /* Address Space Mask */ +#define TSI148_LCSR_DSAT_AMODE_A16 (0<<0) /* A16 */ +#define TSI148_LCSR_DSAT_AMODE_A24 (1<<0) /* A24 */ +#define TSI148_LCSR_DSAT_AMODE_A32 (2<<0) /* A32 */ +#define TSI148_LCSR_DSAT_AMODE_A64 (4<<0) /* A64 */ +#define TSI148_LCSR_DSAT_AMODE_CRCSR (5<<0) /* CR/CSR */ +#define TSI148_LCSR_DSAT_AMODE_USER1 (8<<0) /* User1 */ +#define TSI148_LCSR_DSAT_AMODE_USER2 (9<<0) /* User2 */ +#define TSI148_LCSR_DSAT_AMODE_USER3 (0xa<<0) /* User3 */ +#define TSI148_LCSR_DSAT_AMODE_USER4 (0xb<<0) /* User4 */ + +/* + * DMA Destination Attribute Registers (0-1) + */ +#define TSI148_LCSR_DDAT_TYP_PCI (0<<28) /* Destination PCI Bus */ +#define TSI148_LCSR_DDAT_TYP_VME (1<<28) /* Destination VMEbus */ + +#define TSI148_LCSR_DDAT_2eSSTM_M (3<<11) /* 2eSST Transfer Rate Mask */ +#define TSI148_LCSR_DDAT_2eSSTM_160 (0<<11) /* 160 MB/s */ +#define TSI148_LCSR_DDAT_2eSSTM_267 (1<<11) /* 267 MB/s */ +#define TSI148_LCSR_DDAT_2eSSTM_320 (2<<11) /* 320 MB/s */ + +#define TSI148_LCSR_DDAT_TM_M (7<<8) /* Bus Transfer Protocol Mask */ +#define TSI148_LCSR_DDAT_TM_SCT (0<<8) /* SCT */ +#define TSI148_LCSR_DDAT_TM_BLT (1<<8) /* BLT */ +#define TSI148_LCSR_DDAT_TM_MBLT (2<<8) /* MBLT */ +#define TSI148_LCSR_DDAT_TM_2eVME (3<<8) /* 2eVME */ +#define TSI148_LCSR_DDAT_TM_2eSST (4<<8) /* 2eSST */ +#define TSI148_LCSR_DDAT_TM_2eSSTB (5<<8) /* 2eSST Broadcast */ + +#define TSI148_LCSR_DDAT_DBW_M (3<<6) /* Max Data Width MASK */ +#define TSI148_LCSR_DDAT_DBW_16 (0<<6) /* 16 Bits */ +#define TSI148_LCSR_DDAT_DBW_32 (1<<6) /* 32 Bits */ + +#define TSI148_LCSR_DDAT_SUP (1<<5) /* Supervisory/User Access */ +#define TSI148_LCSR_DDAT_PGM (1<<4) /* Program/Data Access */ + +#define TSI148_LCSR_DDAT_AMODE_M (0xf<<0) /* Address Space Mask */ +#define TSI148_LCSR_DDAT_AMODE_A16 (0<<0) /* A16 */ +#define TSI148_LCSR_DDAT_AMODE_A24 (1<<0) /* A24 */ +#define TSI148_LCSR_DDAT_AMODE_A32 (2<<0) /* A32 */ +#define TSI148_LCSR_DDAT_AMODE_A64 (4<<0) /* A64 */ +#define TSI148_LCSR_DDAT_AMODE_CRCSR (5<<0) /* CRC/SR */ +#define TSI148_LCSR_DDAT_AMODE_USER1 (8<<0) /* User1 */ +#define TSI148_LCSR_DDAT_AMODE_USER2 (9<<0) /* User2 */ +#define TSI148_LCSR_DDAT_AMODE_USER3 (0xa<<0) /* User3 */ +#define TSI148_LCSR_DDAT_AMODE_USER4 (0xb<<0) /* User4 */ + +/* + * DMA Next Link Address Lower + */ +#define TSI148_LCSR_DNLAL_DNLAL_M (0x3FFFFFF<<6) /* Address Mask */ +#define TSI148_LCSR_DNLAL_LLA (1<<0) /* Last Link Address Indicator */ + +/* + * DMA 2eSST Broadcast Select + */ +#define TSI148_LCSR_DBS_M (0x1FFFFF<<0) /* Mask */ + +/* + * GCSR Register Group + */ + +/* + * GCSR Control and Status Register CRG + $604 + */ +#define TSI148_GCSR_GCTRL_LRST (1<<15) /* Local Reset */ +#define TSI148_GCSR_GCTRL_SFAILEN (1<<14) /* System Fail enable */ +#define TSI148_GCSR_GCTRL_BDFAILS (1<<13) /* Board Fail Status */ +#define TSI148_GCSR_GCTRL_SCON (1<<12) /* System Copntroller */ +#define TSI148_GCSR_GCTRL_MEN (1<<11) /* Module Enable (READY) */ + +#define TSI148_GCSR_GCTRL_LMI3S (1<<7) /* Loc Monitor 3 Int Status */ +#define TSI148_GCSR_GCTRL_LMI2S (1<<6) /* Loc Monitor 2 Int Status */ +#define TSI148_GCSR_GCTRL_LMI1S (1<<5) /* Loc Monitor 1 Int Status */ +#define TSI148_GCSR_GCTRL_LMI0S (1<<4) /* Loc Monitor 0 Int Status */ +#define TSI148_GCSR_GCTRL_MBI3S (1<<3) /* Mail box 3 Int Status */ +#define TSI148_GCSR_GCTRL_MBI2S (1<<2) /* Mail box 2 Int Status */ +#define TSI148_GCSR_GCTRL_MBI1S (1<<1) /* Mail box 1 Int Status */ +#define TSI148_GCSR_GCTRL_MBI0S (1<<0) /* Mail box 0 Int Status */ + +#define TSI148_GCSR_GAP (1<<5) /* Geographic Addr Parity */ +#define TSI148_GCSR_GA_M (0x1F<<0) /* Geographic Address Mask */ + +/* + * CR/CSR Register Group + */ + +/* + * CR/CSR Bit Clear Register CRG + $FF4 + */ +#define TSI148_CRCSR_CSRBCR_LRSTC (1<<7) /* Local Reset Clear */ +#define TSI148_CRCSR_CSRBCR_SFAILC (1<<6) /* System Fail Enable Clear */ +#define TSI148_CRCSR_CSRBCR_BDFAILS (1<<5) /* Board Fail Status */ +#define TSI148_CRCSR_CSRBCR_MENC (1<<4) /* Module Enable Clear */ +#define TSI148_CRCSR_CSRBCR_BERRSC (1<<3) /* Bus Error Status Clear */ + +/* + * CR/CSR Bit Set Register CRG+$FF8 + */ +#define TSI148_CRCSR_CSRBSR_LISTS (1<<7) /* Local Reset Clear */ +#define TSI148_CRCSR_CSRBSR_SFAILS (1<<6) /* System Fail Enable Clear */ +#define TSI148_CRCSR_CSRBSR_BDFAILS (1<<5) /* Board Fail Status */ +#define TSI148_CRCSR_CSRBSR_MENS (1<<4) /* Module Enable Clear */ +#define TSI148_CRCSR_CSRBSR_BERRS (1<<3) /* Bus Error Status Clear */ + +/* + * CR/CSR Base Address Register CRG + FFC + */ +#define TSI148_CRCSR_CBAR_M (0x1F<<3) /* Mask */ + +#endif /* TSI148_H */ diff --git a/drivers/staging/vme_user/vme_user.c b/drivers/staging/vme_user/vme_user.c index 859af797630c..4e533c0bfe6d 100644 --- a/drivers/staging/vme_user/vme_user.c +++ b/drivers/staging/vme_user/vme_user.c @@ -33,8 +33,8 @@ #include <linux/io.h> #include <linux/uaccess.h> -#include <linux/vme.h> +#include "vme.h" #include "vme_user.h" static const char driver_name[] = "vme_user"; diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c index 577a38fae369..5de841cb776c 100644 --- a/drivers/staging/vt6655/baseband.c +++ b/drivers/staging/vt6655/baseband.c @@ -1912,7 +1912,7 @@ bool bb_read_embedded(struct vnt_private *priv, unsigned char by_bb_addr, iowrite8(by_bb_addr, iobase + MAC_REG_BBREGADR); /* turn on REGR */ - MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGR); + vt6655_mac_reg_bits_on(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGR); /* W_MAX_TIMEOUT is the timeout period */ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { by_value = ioread8(iobase + MAC_REG_BBREGCTL); @@ -1957,7 +1957,7 @@ bool bb_write_embedded(struct vnt_private *priv, unsigned char by_bb_addr, iowrite8(by_data, iobase + MAC_REG_BBREGDATA); /* turn on BBREGCTL_REGW */ - MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGW); + vt6655_mac_reg_bits_on(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGW); /* W_MAX_TIMEOUT is the timeout period */ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { by_value = ioread8(iobase + MAC_REG_BBREGCTL); @@ -2013,8 +2013,8 @@ bool bb_vt3253_init(struct vnt_private *priv) byVT3253B0_AGC4_RFMD2959[ii][0], byVT3253B0_AGC4_RFMD2959[ii][1]); - VNSvOutPortD(iobase + MAC_REG_ITRTMSET, 0x23); - MACvRegBitsOn(iobase, MAC_REG_PAPEDELAY, BIT(0)); + iowrite32(0x23, iobase + MAC_REG_ITRTMSET); + vt6655_mac_reg_bits_on(iobase, MAC_REG_PAPEDELAY, BIT(0)); } priv->abyBBVGA[0] = 0x18; priv->abyBBVGA[1] = 0x0A; @@ -2054,7 +2054,7 @@ bool bb_vt3253_init(struct vnt_private *priv) byVT3253B0_AGC[ii][1]); iowrite8(0x23, iobase + MAC_REG_ITRTMSET); - MACvRegBitsOn(iobase, MAC_REG_PAPEDELAY, BIT(0)); + vt6655_mac_reg_bits_on(iobase, MAC_REG_PAPEDELAY, BIT(0)); priv->abyBBVGA[0] = 0x14; priv->abyBBVGA[1] = 0x0A; diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c index 2cde0082fc03..846469cc06bb 100644 --- a/drivers/staging/vt6655/card.c +++ b/drivers/staging/vt6655/card.c @@ -293,12 +293,10 @@ bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate, qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp, local_tsf); /* adjust TSF, HW's TSF add TSF Offset reg */ - VNSvOutPortD(priv->port_offset + MAC_REG_TSFOFST, - (u32)qwTSFOffset); - VNSvOutPortD(priv->port_offset + MAC_REG_TSFOFST + 4, - (u32)(qwTSFOffset >> 32)); - MACvRegBitsOn(priv->port_offset, MAC_REG_TFTCTL, - TFTCTL_TSFSYNCEN); + qwTSFOffset = le64_to_cpu(qwTSFOffset); + iowrite32((u32)qwTSFOffset, priv->port_offset + MAC_REG_TSFOFST); + iowrite32((u32)(qwTSFOffset >> 32), priv->port_offset + MAC_REG_TSFOFST + 4); + vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_TSFSYNCEN); } return true; } @@ -326,13 +324,13 @@ bool CARDbSetBeaconPeriod(struct vnt_private *priv, qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval); /* set HW beacon interval */ - VNSvOutPortW(priv->port_offset + MAC_REG_BI, wBeaconInterval); + iowrite16(wBeaconInterval, priv->port_offset + MAC_REG_BI); priv->wBeaconInterval = wBeaconInterval; /* Set NextTBTT */ - VNSvOutPortD(priv->port_offset + MAC_REG_NEXTTBTT, (u32)qwNextTBTT); - VNSvOutPortD(priv->port_offset + MAC_REG_NEXTTBTT + 4, - (u32)(qwNextTBTT >> 32)); - MACvRegBitsOn(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN); + qwNextTBTT = le64_to_cpu(qwNextTBTT); + iowrite32((u32)qwNextTBTT, priv->port_offset + MAC_REG_NEXTTBTT); + iowrite32((u32)(qwNextTBTT >> 32), priv->port_offset + MAC_REG_NEXTTBTT + 4); + vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN); return true; } @@ -354,29 +352,28 @@ void CARDbRadioPowerOff(struct vnt_private *priv) switch (priv->byRFType) { case RF_RFMD2959: - MACvWordRegBitsOff(priv->port_offset, MAC_REG_SOFTPWRCTL, - SOFTPWRCTL_TXPEINV); - MACvWordRegBitsOn(priv->port_offset, MAC_REG_SOFTPWRCTL, - SOFTPWRCTL_SWPE1); + vt6655_mac_word_reg_bits_off(priv->port_offset, MAC_REG_SOFTPWRCTL, + SOFTPWRCTL_TXPEINV); + vt6655_mac_word_reg_bits_on(priv->port_offset, MAC_REG_SOFTPWRCTL, + SOFTPWRCTL_SWPE1); break; case RF_AIROHA: case RF_AL2230S: - MACvWordRegBitsOff(priv->port_offset, MAC_REG_SOFTPWRCTL, - SOFTPWRCTL_SWPE2); - MACvWordRegBitsOff(priv->port_offset, MAC_REG_SOFTPWRCTL, - SOFTPWRCTL_SWPE3); + vt6655_mac_word_reg_bits_off(priv->port_offset, MAC_REG_SOFTPWRCTL, + SOFTPWRCTL_SWPE2); + vt6655_mac_word_reg_bits_off(priv->port_offset, MAC_REG_SOFTPWRCTL, + SOFTPWRCTL_SWPE3); break; } - MACvRegBitsOff(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_RXON); + vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_RXON); bb_set_deep_sleep(priv, priv->local_id); priv->radio_off = true; pr_debug("chester power off\n"); - MACvRegBitsOn(priv->port_offset, MAC_REG_GPIOCTL0, - LED_ACTSET); /* LED issue */ + vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_GPIOCTL0, LED_ACTSET); /* LED issue */ } void CARDvSafeResetTx(struct vnt_private *priv) @@ -411,8 +408,7 @@ void CARDvSafeResetTx(struct vnt_private *priv) MACvSetCurrTXDescAddr(TYPE_AC0DMA, priv, priv->td1_pool_dma); /* set MAC Beacon TX pointer */ - MACvSetCurrBCNTxDescAddr(priv->port_offset, - (priv->tx_beacon_dma)); + iowrite32((u32)priv->tx_beacon_dma, priv->port_offset + MAC_REG_BCNDMAPTR); } /* @@ -453,8 +449,8 @@ void CARDvSafeResetRx(struct vnt_private *priv) } /* set perPkt mode */ - MACvRx0PerPktMode(priv->port_offset); - MACvRx1PerPktMode(priv->port_offset); + iowrite32(RX_PERPKT, priv->port_offset + MAC_REG_RXDMACTL0); + iowrite32(RX_PERPKT, priv->port_offset + MAC_REG_RXDMACTL1); /* set MAC RD pointer */ MACvSetCurrRx0DescAddr(priv, priv->rd0_pool_dma); @@ -553,7 +549,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type) /* swap over to get correct write order */ swap(phy.swap[0], phy.swap[1]); - VNSvOutPortD(priv->port_offset + MAC_REG_RSPINF_B_1, phy.field_write); + iowrite32(phy.field_write, priv->port_offset + MAC_REG_RSPINF_B_1); /* RSPINF_b_2 */ vnt_get_phy_field(priv, 14, @@ -562,7 +558,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type) swap(phy.swap[0], phy.swap[1]); - VNSvOutPortD(priv->port_offset + MAC_REG_RSPINF_B_2, phy.field_write); + iowrite32(phy.field_write, priv->port_offset + MAC_REG_RSPINF_B_2); /* RSPINF_b_5 */ vnt_get_phy_field(priv, 14, @@ -571,7 +567,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type) swap(phy.swap[0], phy.swap[1]); - VNSvOutPortD(priv->port_offset + MAC_REG_RSPINF_B_5, phy.field_write); + iowrite32(phy.field_write, priv->port_offset + MAC_REG_RSPINF_B_5); /* RSPINF_b_11 */ vnt_get_phy_field(priv, 14, @@ -580,75 +576,66 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type) swap(phy.swap[0], phy.swap[1]); - VNSvOutPortD(priv->port_offset + MAC_REG_RSPINF_B_11, phy.field_write); + iowrite32(phy.field_write, priv->port_offset + MAC_REG_RSPINF_B_11); /* RSPINF_a_6 */ s_vCalculateOFDMRParameter(RATE_6M, bb_type, &byTxRate, &byRsvTime); - VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_6, - MAKEWORD(byTxRate, byRsvTime)); + iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_6); /* RSPINF_a_9 */ s_vCalculateOFDMRParameter(RATE_9M, bb_type, &byTxRate, &byRsvTime); - VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_9, - MAKEWORD(byTxRate, byRsvTime)); + iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_9); /* RSPINF_a_12 */ s_vCalculateOFDMRParameter(RATE_12M, bb_type, &byTxRate, &byRsvTime); - VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_12, - MAKEWORD(byTxRate, byRsvTime)); + iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_12); /* RSPINF_a_18 */ s_vCalculateOFDMRParameter(RATE_18M, bb_type, &byTxRate, &byRsvTime); - VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_18, - MAKEWORD(byTxRate, byRsvTime)); + iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_18); /* RSPINF_a_24 */ s_vCalculateOFDMRParameter(RATE_24M, bb_type, &byTxRate, &byRsvTime); - VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_24, - MAKEWORD(byTxRate, byRsvTime)); + iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_24); /* RSPINF_a_36 */ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_36M), bb_type, &byTxRate, &byRsvTime); - VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_36, - MAKEWORD(byTxRate, byRsvTime)); + iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_36); /* RSPINF_a_48 */ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_48M), bb_type, &byTxRate, &byRsvTime); - VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_48, - MAKEWORD(byTxRate, byRsvTime)); + iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_48); /* RSPINF_a_54 */ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_54M), bb_type, &byTxRate, &byRsvTime); - VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_54, - MAKEWORD(byTxRate, byRsvTime)); + iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_54); /* RSPINF_a_72 */ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_54M), bb_type, &byTxRate, &byRsvTime); - VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_72, - MAKEWORD(byTxRate, byRsvTime)); + iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_72); /* Set to Page0 */ MACvSelectPage0(priv->port_offset); @@ -734,9 +721,9 @@ u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2) * In: * priv - The adapter to be read * Out: - * qwCurrTSF - Current TSF counter + * none * - * Return Value: true if success; otherwise false + * Return Value: Current TSF counter */ u64 vt6655_get_current_tsf(struct vnt_private *priv) { @@ -745,7 +732,7 @@ u64 vt6655_get_current_tsf(struct vnt_private *priv) unsigned char data; u32 low, high; - MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD); + vt6655_mac_reg_bits_on(iobase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD); for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { data = ioread8(iobase + MAC_REG_TFTCTL); if (!(data & TFTCTL_TSFCNTRRD)) @@ -808,9 +795,10 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv, qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval); /* Set NextTBTT */ - VNSvOutPortD(iobase + MAC_REG_NEXTTBTT, (u32)qwNextTBTT); - VNSvOutPortD(iobase + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32)); - MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN); + qwNextTBTT = le64_to_cpu(qwNextTBTT); + iowrite32((u32)qwNextTBTT, iobase + MAC_REG_NEXTTBTT); + iowrite32((u32)(qwNextTBTT >> 32), iobase + MAC_REG_NEXTTBTT + 4); + vt6655_mac_reg_bits_on(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN); } /* @@ -834,8 +822,9 @@ void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF, qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval); /* Set NextTBTT */ - VNSvOutPortD(iobase + MAC_REG_NEXTTBTT, (u32)qwTSF); - VNSvOutPortD(iobase + MAC_REG_NEXTTBTT + 4, (u32)(qwTSF >> 32)); - MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN); + qwTSF = le64_to_cpu(qwTSF); + iowrite32((u32)qwTSF, iobase + MAC_REG_NEXTTBTT); + iowrite32((u32)(qwTSF >> 32), iobase + MAC_REG_NEXTTBTT + 4); + vt6655_mac_reg_bits_on(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN); pr_debug("Card:Update Next TBTT[%8llx]\n", qwTSF); } diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c index 652dcaf61169..e926f9829a15 100644 --- a/drivers/staging/vt6655/channel.c +++ b/drivers/staging/vt6655/channel.c @@ -94,7 +94,7 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch) } /* clear NAV */ - MACvRegBitsOn(priv->port_offset, MAC_REG_MACCR, MACCR_CLRNAV); + vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_MACCR, MACCR_CLRNAV); /* TX_PE will reserve 3 us for MAX2829 A mode only, * it is for better TX throughput diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index afaf331fe125..bab08a40fe66 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -122,6 +122,9 @@ static int vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent); static void device_free_info(struct vnt_private *priv); static void device_print_info(struct vnt_private *priv); +static void vt6655_mac_write_bssid_addr(void __iomem *iobase, const u8 *mac_addr); +static void vt6655_mac_read_ether_addr(void __iomem *iobase, u8 *mac_addr); + static int device_init_rd0_ring(struct vnt_private *priv); static int device_init_rd1_ring(struct vnt_private *priv); static int device_init_td0_ring(struct vnt_private *priv); @@ -186,6 +189,22 @@ device_set_options(struct vnt_private *priv) pr_debug(" byBBType= %d\n", (int)priv->byBBType); } +static void vt6655_mac_write_bssid_addr(void __iomem *iobase, const u8 *mac_addr) +{ + iowrite8(1, iobase + MAC_REG_PAGE1SEL); + for (int i = 0; i < 6; i++) + iowrite8(mac_addr[i], iobase + MAC_REG_BSSID0 + i); + iowrite8(0, iobase + MAC_REG_PAGE1SEL); +} + +static void vt6655_mac_read_ether_addr(void __iomem *iobase, u8 *mac_addr) +{ + iowrite8(1, iobase + MAC_REG_PAGE1SEL); + for (int i = 0; i < 6; i++) + mac_addr[i] = ioread8(iobase + MAC_REG_PAR0 + i); + iowrite8(0, iobase + MAC_REG_PAGE1SEL); +} + /* * Initialisation of MAC & BBP registers */ @@ -340,8 +359,8 @@ static void device_init_registers(struct vnt_private *priv) } /* use relative tx timeout and 802.11i D4 */ - MACvWordRegBitsOn(priv->port_offset, - MAC_REG_CFG, (CFG_TKIPOPT | CFG_NOTXTIMEOUT)); + vt6655_mac_word_reg_bits_on(priv->port_offset, MAC_REG_CFG, + (CFG_TKIPOPT | CFG_NOTXTIMEOUT)); /* set performance parameter by registry */ MACvSetShortRetryLimit(priv, priv->byShortRetryLimit); @@ -398,7 +417,7 @@ static void device_init_registers(struct vnt_private *priv) CARDvSafeResetTx(priv); if (priv->local_id <= REV_ID_VT3253_A1) - MACvRegBitsOn(priv->port_offset, MAC_REG_RCR, RCR_WPAERR); + vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_RCR, RCR_WPAERR); /* Turn On Rx DMA */ MACvReceive0(priv->port_offset); @@ -979,7 +998,7 @@ static void vnt_check_bb_vga(struct vnt_private *priv) if (priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) return; - if (!(priv->vif->bss_conf.assoc && priv->current_rssi)) + if (!(priv->vif->cfg.assoc && priv->current_rssi)) return; RFvRSSITodBm(priv, (u8)priv->current_rssi, &dbm); @@ -1055,13 +1074,12 @@ static void vnt_interrupt_process(struct vnt_private *priv) * update ISR counter */ while (isr && priv->vif) { - MACvWriteISR(priv->port_offset, isr); + iowrite32(isr, priv->port_offset + MAC_REG_ISR); if (isr & ISR_FETALERR) { pr_debug(" ISR_FETALERR\n"); iowrite8(0, priv->port_offset + MAC_REG_SOFTPWRCTL); - VNSvOutPortW(priv->port_offset + - MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPECTI); + iowrite16(SOFTPWRCTL_SWPECTI, priv->port_offset + MAC_REG_SOFTPWRCTL); device_error(priv, isr); } @@ -1135,7 +1153,7 @@ static void vnt_interrupt_work(struct work_struct *work) if (priv->vif) vnt_interrupt_process(priv); - MACvIntEnable(priv->port_offset, IMR_MASK_VALUE); + iowrite32(IMR_MASK_VALUE, priv->port_offset + MAC_REG_IMR); } static irqreturn_t vnt_interrupt(int irq, void *arg) @@ -1144,7 +1162,7 @@ static irqreturn_t vnt_interrupt(int irq, void *arg) schedule_work(&priv->interrupt_work); - MACvIntDisable(priv->port_offset); + iowrite32(0, priv->port_offset + MAC_REG_IMR); return IRQ_HANDLED; } @@ -1253,8 +1271,8 @@ static int vnt_start(struct ieee80211_hw *hw) device_init_registers(priv); - dev_dbg(&priv->pcid->dev, "call MACvIntEnable\n"); - MACvIntEnable(priv->port_offset, IMR_MASK_VALUE); + dev_dbg(&priv->pcid->dev, "enable MAC interrupt\n"); + iowrite32(IMR_MASK_VALUE, priv->port_offset + MAC_REG_IMR); ieee80211_wake_queues(hw); @@ -1304,15 +1322,15 @@ static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) case NL80211_IFTYPE_STATION: break; case NL80211_IFTYPE_ADHOC: - MACvRegBitsOff(priv->port_offset, MAC_REG_RCR, RCR_UNICAST); + vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_RCR, RCR_UNICAST); - MACvRegBitsOn(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC); + vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC); break; case NL80211_IFTYPE_AP: - MACvRegBitsOff(priv->port_offset, MAC_REG_RCR, RCR_UNICAST); + vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_RCR, RCR_UNICAST); - MACvRegBitsOn(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP); + vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP); break; default: @@ -1333,16 +1351,16 @@ static void vnt_remove_interface(struct ieee80211_hw *hw, case NL80211_IFTYPE_STATION: break; case NL80211_IFTYPE_ADHOC: - MACvRegBitsOff(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX); - MACvRegBitsOff(priv->port_offset, - MAC_REG_TFTCTL, TFTCTL_TSFCNTREN); - MACvRegBitsOff(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC); + vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX); + vt6655_mac_reg_bits_off(priv->port_offset, + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN); + vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC); break; case NL80211_IFTYPE_AP: - MACvRegBitsOff(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX); - MACvRegBitsOff(priv->port_offset, - MAC_REG_TFTCTL, TFTCTL_TSFCNTREN); - MACvRegBitsOff(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP); + vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX); + vt6655_mac_reg_bits_off(priv->port_offset, + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN); + vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP); break; default: break; @@ -1395,18 +1413,18 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed) static void vnt_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_bss_conf *conf, u32 changed) + struct ieee80211_bss_conf *conf, u64 changed) { struct vnt_private *priv = hw->priv; - priv->current_aid = conf->aid; + priv->current_aid = vif->cfg.aid; if (changed & BSS_CHANGED_BSSID && conf->bssid) { unsigned long flags; spin_lock_irqsave(&priv->lock, flags); - MACvWriteBSSIDAddress(priv->port_offset, conf->bssid); + vt6655_mac_write_bssid_addr(priv->port_offset, conf->bssid); spin_unlock_irqrestore(&priv->lock, flags); } @@ -1458,17 +1476,16 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, if (conf->enable_beacon) { vnt_beacon_enable(priv, vif, conf); - MACvRegBitsOn(priv->port_offset, MAC_REG_TCR, - TCR_AUTOBCNTX); + vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX); } else { - MACvRegBitsOff(priv->port_offset, MAC_REG_TCR, - TCR_AUTOBCNTX); + vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR, + TCR_AUTOBCNTX); } } if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) && priv->op_mode != NL80211_IFTYPE_AP) { - if (conf->assoc && conf->beacon_rate) { + if (vif->cfg.assoc && conf->beacon_rate) { CARDbUpdateTSF(priv, conf->beacon_rate->hw_value, conf->sync_tsf); @@ -1523,20 +1540,17 @@ static void vnt_configure(struct ieee80211_hw *hw, if (priv->mc_list_count > 2) { MACvSelectPage1(priv->port_offset); - VNSvOutPortD(priv->port_offset + - MAC_REG_MAR0, 0xffffffff); - VNSvOutPortD(priv->port_offset + - MAC_REG_MAR0 + 4, 0xffffffff); + iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0); + iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0 + 4); MACvSelectPage0(priv->port_offset); } else { MACvSelectPage1(priv->port_offset); - VNSvOutPortD(priv->port_offset + - MAC_REG_MAR0, (u32)multicast); - VNSvOutPortD(priv->port_offset + - MAC_REG_MAR0 + 4, - (u32)(multicast >> 32)); + multicast = le64_to_cpu(multicast); + iowrite32((u32)multicast, priv->port_offset + MAC_REG_MAR0); + iowrite32((u32)(multicast >> 32), + priv->port_offset + MAC_REG_MAR0 + 4); MACvSelectPage0(priv->port_offset); } @@ -1726,7 +1740,7 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent) } /* initial to reload eeprom */ MACvInitialize(priv); - MACvReadEtherAddress(priv->port_offset, priv->abyCurrentNetAddr); + vt6655_mac_read_ether_addr(priv->port_offset, priv->abyCurrentNetAddr); /* Get RFType */ priv->byRFType = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_RFTYPE); diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c index 88ddd0676463..dcc649532737 100644 --- a/drivers/staging/vt6655/mac.c +++ b/drivers/staging/vt6655/mac.c @@ -38,6 +38,47 @@ #include "mac.h" +void vt6655_mac_reg_bits_on(void __iomem *iobase, const u8 reg_offset, const u8 bit_mask) +{ + unsigned char reg_value; + + reg_value = ioread8(iobase + reg_offset); + iowrite8(reg_value | bit_mask, iobase + reg_offset); +} + +void vt6655_mac_word_reg_bits_on(void __iomem *iobase, const u8 reg_offset, const u16 bit_mask) +{ + unsigned short reg_value; + + reg_value = ioread16(iobase + reg_offset); + iowrite16(reg_value | (bit_mask), iobase + reg_offset); +} + +void vt6655_mac_reg_bits_off(void __iomem *iobase, const u8 reg_offset, const u8 bit_mask) +{ + unsigned char reg_value; + + reg_value = ioread8(iobase + reg_offset); + iowrite8(reg_value & ~(bit_mask), iobase + reg_offset); +} + +void vt6655_mac_word_reg_bits_off(void __iomem *iobase, const u8 reg_offset, const u16 bit_mask) +{ + unsigned short reg_value; + + reg_value = ioread16(iobase + reg_offset); + iowrite16(reg_value & ~(bit_mask), iobase + reg_offset); +} + +static void vt6655_mac_clear_stck_ds(void __iomem *iobase) +{ + u8 reg_value; + + reg_value = ioread8(iobase + MAC_REG_STICKHW); + reg_value = reg_value & 0xFC; + iowrite8(reg_value, iobase + MAC_REG_STICKHW); +} + /* * Description: * Test if all test bits off @@ -337,7 +378,7 @@ bool MACbSafeRxOff(struct vnt_private *priv) } /* try to safe shutdown RX */ - MACvRegBitsOff(io_base, MAC_REG_HOSTCR, HOSTCR_RXON); + vt6655_mac_reg_bits_off(io_base, MAC_REG_HOSTCR, HOSTCR_RXON); /* W_MAX_TIMEOUT is the timeout period */ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { if (!(ioread8(io_base + MAC_REG_HOSTCR) & HOSTCR_RXONST)) @@ -392,7 +433,7 @@ bool MACbSafeTxOff(struct vnt_private *priv) } /* try to safe shutdown TX */ - MACvRegBitsOff(io_base, MAC_REG_HOSTCR, HOSTCR_TXON); + vt6655_mac_reg_bits_off(io_base, MAC_REG_HOSTCR, HOSTCR_TXON); /* W_MAX_TIMEOUT is the timeout period */ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { @@ -423,7 +464,7 @@ bool MACbSafeStop(struct vnt_private *priv) { void __iomem *io_base = priv->port_offset; - MACvRegBitsOff(io_base, MAC_REG_TCR, TCR_AUTOBCNTX); + vt6655_mac_reg_bits_off(io_base, MAC_REG_TCR, TCR_AUTOBCNTX); if (!MACbSafeRxOff(priv)) { pr_debug(" MACbSafeRxOff == false)\n"); @@ -436,7 +477,7 @@ bool MACbSafeStop(struct vnt_private *priv) return false; } - MACvRegBitsOff(io_base, MAC_REG_HOSTCR, HOSTCR_MACEN); + vt6655_mac_reg_bits_off(io_base, MAC_REG_HOSTCR, HOSTCR_MACEN); return true; } @@ -458,7 +499,7 @@ bool MACbShutdown(struct vnt_private *priv) { void __iomem *io_base = priv->port_offset; /* disable MAC IMR */ - MACvIntDisable(io_base); + iowrite32(0, io_base + MAC_REG_IMR); MACvSetLoopbackMode(priv, MAC_LB_INTERNAL); /* stop the adapter */ if (!MACbSafeStop(priv)) { @@ -486,7 +527,7 @@ void MACvInitialize(struct vnt_private *priv) { void __iomem *io_base = priv->port_offset; /* clear sticky bits */ - MACvClearStckDS(io_base); + vt6655_mac_clear_stck_ds(io_base); /* disable force PME-enable */ iowrite8(PME_OVR, io_base + MAC_REG_PMC1); /* only 3253 A */ @@ -730,7 +771,7 @@ bool MACbPSWakeup(struct vnt_private *priv) return true; /* Disable PS */ - MACvRegBitsOff(io_base, MAC_REG_PSCTL, PSCTL_PSEN); + vt6655_mac_reg_bits_off(io_base, MAC_REG_PSCTL, PSCTL_PSEN); /* Check if SyncFlushOK */ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h index 57ae3bdbdb2d..0122c4603c66 100644 --- a/drivers/staging/vt6655/mac.h +++ b/drivers/staging/vt6655/mac.h @@ -18,7 +18,7 @@ #ifndef __MAC_H__ #define __MAC_H__ -#include "upc.h" +#include "device.h" /*--------------------- Export Definitions -------------------------*/ /* Registers in the MAC */ @@ -537,82 +537,14 @@ /*--------------------- Export Macros ------------------------------*/ -#define MACvRegBitsOn(iobase, byRegOfs, byBits) \ -do { \ - unsigned char byData; \ - byData = ioread8(iobase + byRegOfs); \ - iowrite8(byData | (byBits), iobase + byRegOfs); \ -} while (0) - -#define MACvWordRegBitsOn(iobase, byRegOfs, wBits) \ -do { \ - unsigned short wData; \ - wData = ioread16(iobase + byRegOfs); \ - VNSvOutPortW(iobase + byRegOfs, wData | (wBits)); \ -} while (0) - -#define MACvRegBitsOff(iobase, byRegOfs, byBits) \ -do { \ - unsigned char byData; \ - byData = ioread8(iobase + byRegOfs); \ - iowrite8(byData & ~(byBits), iobase + byRegOfs); \ -} while (0) - -#define MACvWordRegBitsOff(iobase, byRegOfs, wBits) \ -do { \ - unsigned short wData; \ - wData = ioread16(iobase + byRegOfs); \ - VNSvOutPortW(iobase + byRegOfs, wData & ~(wBits)); \ -} while (0) - -/* set the chip with current BCN tx descriptor address */ -#define MACvSetCurrBCNTxDescAddr(iobase, dwCurrDescAddr) \ - VNSvOutPortD(iobase + MAC_REG_BCNDMAPTR, \ - dwCurrDescAddr) - -/* set the chip with current BCN length */ -#define MACvSetCurrBCNLength(iobase, wCurrBCNLength) \ - VNSvOutPortW(iobase + MAC_REG_BCNDMACTL + 2, \ - wCurrBCNLength) - -#define MACvWriteBSSIDAddress(iobase, pbyEtherAddr) \ -do { \ - iowrite8(1, iobase + MAC_REG_PAGE1SEL); \ - iowrite8(pbyEtherAddr[0], iobase + MAC_REG_BSSID0); \ - iowrite8(pbyEtherAddr[1], iobase + MAC_REG_BSSID0 + 1); \ - iowrite8(pbyEtherAddr[2], iobase + MAC_REG_BSSID0 + 2); \ - iowrite8(pbyEtherAddr[3], iobase + MAC_REG_BSSID0 + 3); \ - iowrite8(pbyEtherAddr[4], iobase + MAC_REG_BSSID0 + 4); \ - iowrite8(pbyEtherAddr[5], iobase + MAC_REG_BSSID0 + 5); \ - iowrite8(0, iobase + MAC_REG_PAGE1SEL); \ -} while (0) - -#define MACvReadEtherAddress(iobase, pbyEtherAddr) \ -do { \ - iowrite8(1, iobase + MAC_REG_PAGE1SEL); \ - pbyEtherAddr[0] = ioread8(iobase + MAC_REG_PAR0); \ - pbyEtherAddr[1] = ioread8(iobase + MAC_REG_PAR0 + 1); \ - pbyEtherAddr[2] = ioread8(iobase + MAC_REG_PAR0 + 2); \ - pbyEtherAddr[3] = ioread8(iobase + MAC_REG_PAR0 + 3); \ - pbyEtherAddr[4] = ioread8(iobase + MAC_REG_PAR0 + 4); \ - pbyEtherAddr[5] = ioread8(iobase + MAC_REG_PAR0 + 5); \ - iowrite8(0, iobase + MAC_REG_PAGE1SEL); \ -} while (0) - -#define MACvRx0PerPktMode(iobase) \ - VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKT) - -#define MACvRx1PerPktMode(iobase) \ - VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKT) - #define MACvReceive0(iobase) \ do { \ unsigned long dwData; \ dwData = ioread32(iobase + MAC_REG_RXDMACTL0); \ if (dwData & DMACTL_RUN) \ - VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, DMACTL_WAKE); \ + iowrite32(DMACTL_WAKE, iobase + MAC_REG_RXDMACTL0); \ else \ - VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, DMACTL_RUN); \ + iowrite32(DMACTL_RUN, iobase + MAC_REG_RXDMACTL0); \ } while (0) #define MACvReceive1(iobase) \ @@ -620,9 +552,9 @@ do { \ unsigned long dwData; \ dwData = ioread32(iobase + MAC_REG_RXDMACTL1); \ if (dwData & DMACTL_RUN) \ - VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_WAKE); \ + iowrite32(DMACTL_WAKE, iobase + MAC_REG_RXDMACTL1); \ else \ - VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_RUN); \ + iowrite32(DMACTL_RUN, iobase + MAC_REG_RXDMACTL1); \ } while (0) #define MACvTransmit0(iobase) \ @@ -630,9 +562,9 @@ do { \ unsigned long dwData; \ dwData = ioread32(iobase + MAC_REG_TXDMACTL0); \ if (dwData & DMACTL_RUN) \ - VNSvOutPortD(iobase + MAC_REG_TXDMACTL0, DMACTL_WAKE); \ + iowrite32(DMACTL_WAKE, iobase + MAC_REG_TXDMACTL0); \ else \ - VNSvOutPortD(iobase + MAC_REG_TXDMACTL0, DMACTL_RUN); \ + iowrite32(DMACTL_RUN, iobase + MAC_REG_TXDMACTL0); \ } while (0) #define MACvTransmitAC0(iobase) \ @@ -640,28 +572,11 @@ do { \ unsigned long dwData; \ dwData = ioread32(iobase + MAC_REG_AC0DMACTL); \ if (dwData & DMACTL_RUN) \ - VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_WAKE); \ + iowrite32(DMACTL_WAKE, iobase + MAC_REG_AC0DMACTL); \ else \ - VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_RUN); \ -} while (0) - -#define MACvClearStckDS(iobase) \ -do { \ - unsigned char byOrgValue; \ - byOrgValue = ioread8(iobase + MAC_REG_STICKHW); \ - byOrgValue = byOrgValue & 0xFC; \ - iowrite8(byOrgValue, iobase + MAC_REG_STICKHW); \ + iowrite32(DMACTL_RUN, iobase + MAC_REG_AC0DMACTL); \ } while (0) -#define MACvWriteISR(iobase, dwValue) \ - VNSvOutPortD(iobase + MAC_REG_ISR, dwValue) - -#define MACvIntEnable(iobase, dwMask) \ - VNSvOutPortD(iobase + MAC_REG_IMR, dwMask) - -#define MACvIntDisable(iobase) \ - VNSvOutPortD(iobase + MAC_REG_IMR, 0) - #define MACvSelectPage0(iobase) \ iowrite8(0, iobase + MAC_REG_PAGE1SEL) @@ -673,7 +588,7 @@ do { \ unsigned long dwOrgValue; \ dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \ dwOrgValue = dwOrgValue | ENCFG_PROTECTMD; \ - VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \ + iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \ } while (0) #define MACvDisableProtectMD(iobase) \ @@ -681,7 +596,7 @@ do { \ unsigned long dwOrgValue; \ dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \ dwOrgValue = dwOrgValue & ~ENCFG_PROTECTMD; \ - VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \ + iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \ } while (0) #define MACvEnableBarkerPreambleMd(iobase) \ @@ -689,7 +604,7 @@ do { \ unsigned long dwOrgValue; \ dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \ dwOrgValue = dwOrgValue | ENCFG_BARKERPREAM; \ - VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \ + iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \ } while (0) #define MACvDisableBarkerPreambleMd(iobase) \ @@ -697,7 +612,7 @@ do { \ unsigned long dwOrgValue; \ dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \ dwOrgValue = dwOrgValue & ~ENCFG_BARKERPREAM; \ - VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \ + iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \ } while (0) #define MACvSetBBType(iobase, byTyp) \ @@ -706,15 +621,20 @@ do { \ dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \ dwOrgValue = dwOrgValue & ~ENCFG_BBTYPE_MASK; \ dwOrgValue = dwOrgValue | (unsigned long)byTyp; \ - VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \ + iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \ } while (0) #define MACvSetRFLE_LatchBase(iobase) \ - MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT) + vt6655_mac_word_reg_bits_on(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT) #define MAKEWORD(lb, hb) \ ((unsigned short)(((unsigned char)(lb)) | (((unsigned short)((unsigned char)(hb))) << 8))) +void vt6655_mac_reg_bits_on(void __iomem *iobase, const u8 reg_offset, const u8 bit_mask); +void vt6655_mac_word_reg_bits_on(void __iomem *iobase, const u8 reg_offset, const u16 bit_mask); +void vt6655_mac_reg_bits_off(void __iomem *iobase, const u8 reg_offset, const u8 bit_mask); +void vt6655_mac_word_reg_bits_off(void __iomem *iobase, const u8 reg_offset, const u16 bit_mask); + bool MACbIsRegBitsOff(struct vnt_private *priv, unsigned char byRegOfs, unsigned char byTestBits); diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c index 06066fa56dd5..8527ad3eff48 100644 --- a/drivers/staging/vt6655/power.c +++ b/drivers/staging/vt6655/power.c @@ -52,30 +52,30 @@ void PSvEnablePowerSaving(struct vnt_private *priv, u16 wAID = priv->current_aid | BIT(14) | BIT(15); /* set period of power up before TBTT */ - VNSvOutPortW(priv->port_offset + MAC_REG_PWBT, C_PWBT); + iowrite16(C_PWBT, priv->port_offset + MAC_REG_PWBT); if (priv->op_mode != NL80211_IFTYPE_ADHOC) { /* set AID */ - VNSvOutPortW(priv->port_offset + MAC_REG_AIDATIM, wAID); + iowrite16(wAID, priv->port_offset + MAC_REG_AIDATIM); } /* Set AutoSleep */ - MACvRegBitsOn(priv->port_offset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP); + vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP); /* Set HWUTSF */ - MACvRegBitsOn(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_HWUTSF); + vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_HWUTSF); if (wListenInterval >= 2) { /* clear always listen beacon */ - MACvRegBitsOff(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN); + vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN); /* first time set listen next beacon */ - MACvRegBitsOn(priv->port_offset, MAC_REG_PSCTL, PSCTL_LNBCN); + vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCTL, PSCTL_LNBCN); } else { /* always listen beacon */ - MACvRegBitsOn(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN); + vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN); } /* enable power saving hw function */ - MACvRegBitsOn(priv->port_offset, MAC_REG_PSCTL, PSCTL_PSEN); + vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCTL, PSCTL_PSEN); priv->bEnablePSMode = true; priv->bPWBitOn = true; @@ -98,13 +98,13 @@ void PSvDisablePowerSaving(struct vnt_private *priv) MACbPSWakeup(priv); /* clear AutoSleep */ - MACvRegBitsOff(priv->port_offset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP); + vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP); /* clear HWUTSF */ - MACvRegBitsOff(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_HWUTSF); + vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_HWUTSF); /* set always listen beacon */ - MACvRegBitsOn(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN); + vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN); priv->bEnablePSMode = false; @@ -135,8 +135,7 @@ bool PSbIsNextTBTTWakeUp(struct vnt_private *priv) if (priv->wake_up_count == 1) { /* Turn on wake up to listen next beacon */ - MACvRegBitsOn(priv->port_offset, - MAC_REG_PSCTL, PSCTL_LNBCN); + vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCTL, PSCTL_LNBCN); wake_up = true; } } diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c index ee5e2e0d9a8c..1fadc2fc4412 100644 --- a/drivers/staging/vt6655/rf.c +++ b/drivers/staging/vt6655/rf.c @@ -171,7 +171,7 @@ bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData) unsigned short ww; unsigned long dwValue; - VNSvOutPortD(iobase + MAC_REG_IFREGCTL, dwData); + iowrite32((u32)dwData, iobase + MAC_REG_IFREGCTL); /* W_MAX_TIMEOUT is the timeout period */ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { @@ -209,10 +209,10 @@ static bool RFbAL2230Init(struct vnt_private *priv) /* 3-wire control for normal mode */ iowrite8(0, iobase + MAC_REG_SOFTPWRCTL); - MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI | - SOFTPWRCTL_TXPEINV)); + vt6655_mac_word_reg_bits_on(iobase, MAC_REG_SOFTPWRCTL, + (SOFTPWRCTL_SWPECTI | SOFTPWRCTL_TXPEINV)); /* PLL Off */ - MACvWordRegBitsOff(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); + vt6655_mac_word_reg_bits_off(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); /* patch abnormal AL2230 frequency output */ IFRFbWriteEmbedded(priv, (0x07168700 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW)); @@ -222,7 +222,7 @@ static bool RFbAL2230Init(struct vnt_private *priv) MACvTimer0MicroSDelay(priv, 30); /* delay 30 us */ /* PLL On */ - MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); + vt6655_mac_word_reg_bits_on(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); MACvTimer0MicroSDelay(priv, 150);/* 150us */ ret &= IFRFbWriteEmbedded(priv, (0x00d80f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW)); @@ -232,10 +232,10 @@ static bool RFbAL2230Init(struct vnt_private *priv) ret &= IFRFbWriteEmbedded(priv, al2230_init_table[CB_AL2230_INIT_SEQ - 1]); - MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 | - SOFTPWRCTL_SWPE2 | - SOFTPWRCTL_SWPECTI | - SOFTPWRCTL_TXPEINV)); + vt6655_mac_word_reg_bits_on(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 | + SOFTPWRCTL_SWPE2 | + SOFTPWRCTL_SWPECTI | + SOFTPWRCTL_TXPEINV)); /* 3-wire control for power saving mode */ iowrite8(PSSIG_WPE3 | PSSIG_WPE2, iobase + MAC_REG_PSPWRSIG); @@ -350,7 +350,7 @@ bool rf_write_wake_prog_syn(struct vnt_private *priv, unsigned char rf_type, unsigned char sleep_count = 0; unsigned short idx = MISCFIFO_SYNDATA_IDX; - VNSvOutPortW(iobase + MAC_REG_MISCFFNDEX, 0); + iowrite16(0, iobase + MAC_REG_MISCFFNDEX); switch (rf_type) { case RF_AIROHA: case RF_AL2230S: diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c index 71cbfa607d96..5bdb5176772c 100644 --- a/drivers/staging/vt6655/rxtx.c +++ b/drivers/staging/vt6655/rxtx.c @@ -1420,11 +1420,11 @@ static int vnt_beacon_xmit(struct vnt_private *priv, priv->wBCNBufLen = sizeof(*short_head) + skb->len; - MACvSetCurrBCNTxDescAddr(priv->port_offset, priv->tx_beacon_dma); + iowrite32((u32)priv->tx_beacon_dma, priv->port_offset + MAC_REG_BCNDMAPTR); - MACvSetCurrBCNLength(priv->port_offset, priv->wBCNBufLen); + iowrite16(priv->wBCNBufLen, priv->port_offset + MAC_REG_BCNDMACTL + 2); /* Set auto Transmit on */ - MACvRegBitsOn(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX); + vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX); /* Poll Transmit the adapter */ iowrite8(BEACON_READY, priv->port_offset + MAC_REG_BCNDMACTL); @@ -1435,7 +1435,7 @@ int vnt_beacon_make(struct vnt_private *priv, struct ieee80211_vif *vif) { struct sk_buff *beacon; - beacon = ieee80211_beacon_get(priv->hw, vif); + beacon = ieee80211_beacon_get(priv->hw, vif, 0); if (!beacon) return -ENOMEM; diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c index 722a2cc9a473..ee5ca4db74dc 100644 --- a/drivers/staging/vt6655/srom.c +++ b/drivers/staging/vt6655/srom.c @@ -27,7 +27,7 @@ * */ -#include "upc.h" +#include "device.h" #include "mac.h" #include "srom.h" diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h deleted file mode 100644 index 2a47f5782b71..000000000000 --- a/drivers/staging/vt6655/upc.h +++ /dev/null @@ -1,35 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. - * All rights reserved. - * - * Purpose: Macros to access device - * - * Author: Tevin Chen - * - * Date: Mar 17, 1997 - * - */ - -#ifndef __UPC_H__ -#define __UPC_H__ - -#include "device.h" - -/*--------------------- Export Definitions -------------------------*/ - -/* For memory mapped IO */ - -#define VNSvOutPortW(dwIOAddress, wData) \ - iowrite16((u16)(wData), dwIOAddress) - -#define VNSvOutPortD(dwIOAddress, dwData) \ - iowrite32((u32)(dwData), dwIOAddress) - -/*--------------------- Export Classes ----------------------------*/ - -/*--------------------- Export Variables --------------------------*/ - -/*--------------------- Export Functions --------------------------*/ - -#endif /* __UPC_H__ */ diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index ae7f5916d4d6..897ee0f7fc6b 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -745,11 +745,11 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed) static void vnt_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_bss_conf *conf, u32 changed) + struct ieee80211_bss_conf *conf, u64 changed) { struct vnt_private *priv = hw->priv; - priv->current_aid = conf->aid; + priv->current_aid = vif->cfg.aid; if (changed & BSS_CHANGED_BSSID && conf->bssid) vnt_mac_set_bssid_addr(priv, (u8 *)conf->bssid); @@ -811,7 +811,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) && priv->op_mode != NL80211_IFTYPE_AP) { - if (conf->assoc && conf->beacon_rate) { + if (vif->cfg.assoc && conf->beacon_rate) { u16 ps_beacon_int = conf->beacon_int; if (conf->dtim_period) diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c index 4d29f8ebb393..cd99091c6c28 100644 --- a/drivers/staging/vt6656/rxtx.c +++ b/drivers/staging/vt6656/rxtx.c @@ -699,7 +699,7 @@ int vnt_beacon_make(struct vnt_private *priv, struct ieee80211_vif *vif) { struct sk_buff *beacon; - beacon = ieee80211_beacon_get(priv->hw, vif); + beacon = ieee80211_beacon_get(priv->hw, vif, 0); if (!beacon) return -ENOMEM; diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index 87379edce9a8..b7b56d8406d1 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c @@ -645,7 +645,7 @@ void prism2_disconnected(struct wlandevice *wlandev) void prism2_roamed(struct wlandevice *wlandev) { struct cfg80211_roam_info roam_info = { - .bssid = wlandev->bssid, + .links[0].bssid = wlandev->bssid, }; cfg80211_roamed(wlandev->netdev, &roam_info, GFP_KERNEL); |