diff options
Diffstat (limited to 'drivers')
472 files changed, 19824 insertions, 7056 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 117ca14ccf85..ba2901e76769 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -204,4 +204,6 @@ source "drivers/fpga/Kconfig" source "drivers/fsi/Kconfig" +source "drivers/tee/Kconfig" + endmenu diff --git a/drivers/Makefile b/drivers/Makefile index edba1edc6654..cfabd141dba2 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -180,3 +180,4 @@ obj-$(CONFIG_ANDROID) += android/ obj-$(CONFIG_NVMEM) += nvmem/ obj-$(CONFIG_FPGA) += fpga/ obj-$(CONFIG_FSI) += fsi/ +obj-$(CONFIG_TEE) += tee/ diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index d78065cc9324..b1aacfc62b1f 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -50,6 +50,7 @@ acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o acpi-y += sysfs.o acpi-y += property.o acpi-$(CONFIG_X86) += acpi_cmos_rtc.o +acpi-$(CONFIG_X86) += x86/utils.o acpi-$(CONFIG_DEBUG_FS) += debugfs.o acpi-$(CONFIG_ACPI_NUMA) += numa.o acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 17a1eb14847a..fc6c416f8724 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -106,6 +106,16 @@ static const struct apd_device_desc vulcan_spi_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 133000000, }; + +static const struct apd_device_desc hip07_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 200000000, +}; + +static const struct apd_device_desc hip08_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 250000000, +}; #endif #else @@ -170,6 +180,8 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "APMC0D0F", APD_ADDR(xgene_i2c_desc) }, { "BRCM900D", APD_ADDR(vulcan_spi_desc) }, { "CAV900D", APD_ADDR(vulcan_spi_desc) }, + { "HISI0A21", APD_ADDR(hip07_i2c_desc) }, + { "HISI0A22", APD_ADDR(hip08_i2c_desc) }, #endif { } }; diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 5edfd9c49044..10347e3d73ad 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -143,6 +143,22 @@ static void lpss_deassert_reset(struct lpss_private_data *pdata) writel(val, pdata->mmio_base + offset); } +/* + * BYT PWM used for backlight control by the i915 driver on systems without + * the Crystal Cove PMIC. + */ +static struct pwm_lookup byt_pwm_lookup[] = { + PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0", + "pwm_backlight", 0, PWM_POLARITY_NORMAL, + "pwm-lpss-platform"), +}; + +static void byt_pwm_setup(struct lpss_private_data *pdata) +{ + if (!acpi_dev_present("INT33FD", NULL, -1)) + pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); +} + #define LPSS_I2C_ENABLE 0x6c static void byt_i2c_setup(struct lpss_private_data *pdata) @@ -200,6 +216,7 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = { static const struct lpss_device_desc byt_pwm_dev_desc = { .flags = LPSS_SAVE_CTX, + .setup = byt_pwm_setup, }; static const struct lpss_device_desc bsw_pwm_dev_desc = { diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index 32d93edbc479..dea65306b687 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile @@ -2,7 +2,7 @@ # Makefile for ACPICA Core interpreter # -ccflags-y := -Os -DBUILDING_ACPICA +ccflags-y := -Os -D_LINUX -DBUILDING_ACPICA ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT # use acpi.o to put all files here into acpi.o modparam namespace diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h new file mode 100644 index 000000000000..c84223b60b35 --- /dev/null +++ b/drivers/acpi/acpica/acconvert.h @@ -0,0 +1,144 @@ +/****************************************************************************** + * + * Module Name: acapps - common include for ACPI applications/tools + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2017, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef _ACCONVERT +#define _ACCONVERT + +/* Definitions for comment state */ + +#define ASL_COMMENT_STANDARD 1 +#define ASLCOMMENT_INLINE 2 +#define ASL_COMMENT_OPEN_PAREN 3 +#define ASL_COMMENT_CLOSE_PAREN 4 +#define ASL_COMMENT_CLOSE_BRACE 5 + +/* Definitions for comment print function*/ + +#define AML_COMMENT_STANDARD 1 +#define AMLCOMMENT_INLINE 2 +#define AML_COMMENT_END_NODE 3 +#define AML_NAMECOMMENT 4 +#define AML_COMMENT_CLOSE_BRACE 5 +#define AML_COMMENT_ENDBLK 6 +#define AML_COMMENT_INCLUDE 7 + +#ifdef ACPI_ASL_COMPILER +/* + * cvcompiler + */ +void +cv_process_comment(struct asl_comment_state current_state, + char *string_buffer, int c1); + +void +cv_process_comment_type2(struct asl_comment_state current_state, + char *string_buffer); + +u32 cv_calculate_comment_lengths(union acpi_parse_object *op); + +void cv_process_comment_state(char input); + +char *cv_append_inline_comment(char *inline_comment, char *to_add); + +void cv_add_to_comment_list(char *to_add); + +void cv_place_comment(u8 type, char *comment_string); + +u32 cv_parse_op_block_type(union acpi_parse_object *op); + +struct acpi_comment_node *cv_comment_node_calloc(void); + +void cg_write_aml_def_block_comment(union acpi_parse_object *op); + +void +cg_write_one_aml_comment(union acpi_parse_object *op, + char *comment_to_print, u8 input_option); + +void cg_write_aml_comment(union acpi_parse_object *op); + +/* + * cvparser + */ +void +cv_init_file_tree(struct acpi_table_header *table, + u8 *aml_start, u32 aml_length); + +void cv_clear_op_comments(union acpi_parse_object *op); + +struct acpi_file_node *cv_filename_exists(char *filename, + struct acpi_file_node *head); + +void cv_label_file_node(union acpi_parse_object *op); + +void +cv_capture_list_comments(struct acpi_parse_state *parser_state, + struct acpi_comment_node *list_head, + struct acpi_comment_node *list_tail); + +void cv_capture_comments_only(struct acpi_parse_state *parser_state); + +void cv_capture_comments(struct acpi_walk_state *walk_state); + +void cv_transfer_comments(union acpi_parse_object *op); + +/* + * cvdisasm + */ +void cv_switch_files(u32 level, union acpi_parse_object *op); + +u8 cv_file_has_switched(union acpi_parse_object *op); + +void cv_close_paren_write_comment(union acpi_parse_object *op, u32 level); + +void cv_close_brace_write_comment(union acpi_parse_object *op, u32 level); + +void +cv_print_one_comment_list(struct acpi_comment_node *comment_list, u32 level); + +void +cv_print_one_comment_type(union acpi_parse_object *op, + u8 comment_type, char *end_str, u32 level); + +#endif + +#endif /* _ACCONVERT */ diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 1d955fe216c4..abe8c316908c 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -370,6 +370,59 @@ ACPI_GLOBAL(const char, *acpi_gbl_pld_shape_list[]); #endif +/* + * Meant for the -ca option. + */ +ACPI_INIT_GLOBAL(char *, acpi_gbl_current_inline_comment, NULL); +ACPI_INIT_GLOBAL(char *, acpi_gbl_current_end_node_comment, NULL); +ACPI_INIT_GLOBAL(char *, acpi_gbl_current_open_brace_comment, NULL); +ACPI_INIT_GLOBAL(char *, acpi_gbl_current_close_brace_comment, NULL); + +ACPI_INIT_GLOBAL(char *, acpi_gbl_root_filename, NULL); +ACPI_INIT_GLOBAL(char *, acpi_gbl_current_filename, NULL); +ACPI_INIT_GLOBAL(char *, acpi_gbl_current_parent_filename, NULL); +ACPI_INIT_GLOBAL(char *, acpi_gbl_current_include_filename, NULL); + +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_last_list_head, NULL); + +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_head, + NULL); +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_tail, + NULL); + +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_head, + NULL); +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_tail, + NULL); + +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_head, + NULL); +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_tail, + NULL); + +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_head, + NULL); +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail, + NULL); + +ACPI_INIT_GLOBAL(struct acpi_comment_addr_node, + *acpi_gbl_comment_addr_list_head, NULL); + +ACPI_INIT_GLOBAL(union acpi_parse_object, *acpi_gbl_current_scope, NULL); + +ACPI_INIT_GLOBAL(struct acpi_file_node, *acpi_gbl_file_tree_root, NULL); + +ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_reg_comment_cache); +ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_comment_addr_cache); +ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_file_cache); + +ACPI_INIT_GLOBAL(u8, gbl_capture_comments, FALSE); + +ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_asl_conversion, FALSE); +ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_conv_debug_file, NULL); + +ACPI_GLOBAL(char, acpi_gbl_table_sig[4]); + /***************************************************************************** * * Application globals diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 8fd495e8fdce..f9b3f7fef462 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -53,7 +53,7 @@ typedef u32 acpi_mutex_handle; /* Total number of aml opcodes defined */ -#define AML_NUM_OPCODES 0x82 +#define AML_NUM_OPCODES 0x83 /* Forward declarations */ @@ -754,21 +754,52 @@ union acpi_parse_value { #define ACPI_DISASM_ONLY_MEMBERS(a) #endif +#if defined(ACPI_ASL_COMPILER) +#define ACPI_CONVERTER_ONLY_MEMBERS(a) a; +#else +#define ACPI_CONVERTER_ONLY_MEMBERS(a) +#endif + #define ACPI_PARSE_COMMON \ - union acpi_parse_object *parent; /* Parent op */\ - u8 descriptor_type; /* To differentiate various internal objs */\ - u8 flags; /* Type of Op */\ - u16 aml_opcode; /* AML opcode */\ - u8 *aml; /* Address of declaration in AML */\ - union acpi_parse_object *next; /* Next op */\ - struct acpi_namespace_node *node; /* For use by interpreter */\ - union acpi_parse_value value; /* Value or args associated with the opcode */\ - u8 arg_list_length; /* Number of elements in the arg list */\ - ACPI_DISASM_ONLY_MEMBERS (\ - u16 disasm_flags; /* Used during AML disassembly */\ - u8 disasm_opcode; /* Subtype used for disassembly */\ - char *operator_symbol;/* Used for C-style operator name strings */\ - char aml_op_name[16]) /* Op name (debug only) */ + union acpi_parse_object *parent; /* Parent op */\ + u8 descriptor_type; /* To differentiate various internal objs */\ + u8 flags; /* Type of Op */\ + u16 aml_opcode; /* AML opcode */\ + u8 *aml; /* Address of declaration in AML */\ + union acpi_parse_object *next; /* Next op */\ + struct acpi_namespace_node *node; /* For use by interpreter */\ + union acpi_parse_value value; /* Value or args associated with the opcode */\ + u8 arg_list_length; /* Number of elements in the arg list */\ + ACPI_DISASM_ONLY_MEMBERS (\ + u16 disasm_flags; /* Used during AML disassembly */\ + u8 disasm_opcode; /* Subtype used for disassembly */\ + char *operator_symbol; /* Used for C-style operator name strings */\ + char aml_op_name[16]) /* Op name (debug only) */\ + ACPI_CONVERTER_ONLY_MEMBERS (\ + char *inline_comment; /* Inline comment */\ + char *end_node_comment; /* End of node comment */\ + char *name_comment; /* Comment associated with the first parameter of the name node */\ + char *close_brace_comment; /* Comments that come after } on the same as } */\ + struct acpi_comment_node *comment_list; /* comments that appears before this node */\ + struct acpi_comment_node *end_blk_comment; /* comments that at the end of a block but before ) or } */\ + char *cv_filename; /* Filename associated with this node. Used for ASL/ASL+ converter */\ + char *cv_parent_filename) /* Parent filename associated with this node. Used for ASL/ASL+ converter */ + +/* categories of comments */ + +typedef enum { + STANDARD_COMMENT = 1, + INLINE_COMMENT, + ENDNODE_COMMENT, + OPENBRACE_COMMENT, + CLOSE_BRACE_COMMENT, + STD_DEFBLK_COMMENT, + END_DEFBLK_COMMENT, + FILENAME_COMMENT, + PARENTFILENAME_COMMENT, + ENDBLK_COMMENT, + INCLUDE_COMMENT +} asl_comment_types; /* Internal opcodes for disasm_opcode field above */ @@ -784,9 +815,38 @@ union acpi_parse_value { #define ACPI_DASM_LNOT_SUFFIX 0x09 /* End of a Lnot_equal (etc.) pair of opcodes */ #define ACPI_DASM_HID_STRING 0x0A /* String is a _HID or _CID */ #define ACPI_DASM_IGNORE_SINGLE 0x0B /* Ignore the opcode but not it's children */ -#define ACPI_DASM_SWITCH_PREDICATE 0x0C /* Object is a predicate for a Switch or Case block */ -#define ACPI_DASM_CASE 0x0D /* If/Else is a Case in a Switch/Case block */ -#define ACPI_DASM_DEFAULT 0x0E /* Else is a Default in a Switch/Case block */ +#define ACPI_DASM_SWITCH 0x0C /* While is a Switch */ +#define ACPI_DASM_SWITCH_PREDICATE 0x0D /* Object is a predicate for a Switch or Case block */ +#define ACPI_DASM_CASE 0x0E /* If/Else is a Case in a Switch/Case block */ +#define ACPI_DASM_DEFAULT 0x0F /* Else is a Default in a Switch/Case block */ + +/* + * List struct used in the -ca option + */ +struct acpi_comment_node { + char *comment; + struct acpi_comment_node *next; +}; + +struct acpi_comment_addr_node { + u8 *addr; + struct acpi_comment_addr_node *next; +}; + +/* + * File node - used for "Include" operator file stack and + * depdendency tree for the -ca option + */ +struct acpi_file_node { + void *file; + char *filename; + char *file_start; /* Points to AML and indicates when the AML for this particular file starts. */ + char *file_end; /* Points to AML and indicates when the AML for this particular file ends. */ + struct acpi_file_node *next; + struct acpi_file_node *parent; + u8 include_written; + struct acpi_comment_node *include_comment; +}; /* * Generic operation (for example: If, While, Store) @@ -813,6 +873,8 @@ struct acpi_parse_obj_asl { ACPI_PARSE_COMMON union acpi_parse_object *child; union acpi_parse_object *parent_method; char *filename; + u8 file_changed; + char *parent_filename; char *external_name; char *namepath; char name_seg[4]; @@ -842,6 +904,14 @@ union acpi_parse_object { struct acpi_parse_obj_asl asl; }; +struct asl_comment_state { + u8 comment_type; + u32 spaces_before; + union acpi_parse_object *latest_parse_node; + union acpi_parse_object *parsing_paren_brace_node; + u8 capture_comments; +}; + /* * Parse state - one state per parser invocation and each control * method. diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index c3337514e0ed..c7f0c96cc00f 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h @@ -493,4 +493,39 @@ #define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7')) +/* + * Macors used for the ASL-/ASL+ converter utility + */ +#ifdef ACPI_ASL_COMPILER + +#define ASL_CV_LABEL_FILENODE(a) cv_label_file_node(a); +#define ASL_CV_CAPTURE_COMMENTS_ONLY(a) cv_capture_comments_only (a); +#define ASL_CV_CAPTURE_COMMENTS(a) cv_capture_comments (a); +#define ASL_CV_TRANSFER_COMMENTS(a) cv_transfer_comments (a); +#define ASL_CV_CLOSE_PAREN(a,b) cv_close_paren_write_comment(a,b); +#define ASL_CV_CLOSE_BRACE(a,b) cv_close_brace_write_comment(a,b); +#define ASL_CV_SWITCH_FILES(a,b) cv_switch_files(a,b); +#define ASL_CV_CLEAR_OP_COMMENTS(a) cv_clear_op_comments(a); +#define ASL_CV_PRINT_ONE_COMMENT(a,b,c,d) cv_print_one_comment_type (a,b,c,d); +#define ASL_CV_PRINT_ONE_COMMENT_LIST(a,b) cv_print_one_comment_list (a,b); +#define ASL_CV_FILE_HAS_SWITCHED(a) cv_file_has_switched(a) +#define ASL_CV_INIT_FILETREE(a,b,c) cv_init_file_tree(a,b,c); + +#else + +#define ASL_CV_LABEL_FILENODE(a) +#define ASL_CV_CAPTURE_COMMENTS_ONLY(a) +#define ASL_CV_CAPTURE_COMMENTS(a) +#define ASL_CV_TRANSFER_COMMENTS(a) +#define ASL_CV_CLOSE_PAREN(a,b) acpi_os_printf (")"); +#define ASL_CV_CLOSE_BRACE(a,b) acpi_os_printf ("}"); +#define ASL_CV_SWITCH_FILES(a,b) +#define ASL_CV_CLEAR_OP_COMMENTS(a) +#define ASL_CV_PRINT_ONE_COMMENT(a,b,c,d) +#define ASL_CV_PRINT_ONE_COMMENT_LIST(a,b) +#define ASL_CV_FILE_HAS_SWITCHED(a) 0 +#define ASL_CV_INIT_FILETREE(a,b,c) + +#endif + #endif /* ACMACROS_H */ diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h index e758f098ff4b..a5d9af758c52 100644 --- a/drivers/acpi/acpica/acopcode.h +++ b/drivers/acpi/acpica/acopcode.h @@ -90,6 +90,7 @@ #define ARGP_BUFFER_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_TERMARG, ARGP_BYTELIST) #define ARGP_BYTE_OP ARGP_LIST1 (ARGP_BYTEDATA) #define ARGP_BYTELIST_OP ARGP_LIST1 (ARGP_NAMESTRING) +#define ARGP_COMMENT_OP ARGP_LIST2 (ARGP_BYTEDATA, ARGP_COMMENT) #define ARGP_CONCAT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) #define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) #define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SIMPLENAME, ARGP_TARGET) @@ -223,6 +224,7 @@ #define ARGI_BUFFER_OP ARGI_LIST1 (ARGI_INTEGER) #define ARGI_BYTE_OP ARGI_INVALID_OPCODE #define ARGI_BYTELIST_OP ARGI_INVALID_OPCODE +#define ARGI_COMMENT_OP ARGI_INVALID_OPCODE #define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_ANYTYPE, ARGI_ANYTYPE, ARGI_TARGETREF) #define ARGI_CONCAT_RES_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_BUFFER, ARGI_TARGETREF) #define ARGI_COND_REF_OF_OP ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF) diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h index b536fd471292..176f7e9b4d0e 100644 --- a/drivers/acpi/acpica/amlcode.h +++ b/drivers/acpi/acpica/amlcode.h @@ -48,11 +48,8 @@ /* primary opcodes */ -#define AML_NULL_CHAR (u16) 0x00 - #define AML_ZERO_OP (u16) 0x00 #define AML_ONE_OP (u16) 0x01 -#define AML_UNASSIGNED (u16) 0x02 #define AML_ALIAS_OP (u16) 0x06 #define AML_NAME_OP (u16) 0x08 #define AML_BYTE_OP (u16) 0x0a @@ -63,17 +60,15 @@ #define AML_SCOPE_OP (u16) 0x10 #define AML_BUFFER_OP (u16) 0x11 #define AML_PACKAGE_OP (u16) 0x12 -#define AML_VAR_PACKAGE_OP (u16) 0x13 /* ACPI 2.0 */ +#define AML_VARIABLE_PACKAGE_OP (u16) 0x13 /* ACPI 2.0 */ #define AML_METHOD_OP (u16) 0x14 #define AML_EXTERNAL_OP (u16) 0x15 /* ACPI 6.0 */ #define AML_DUAL_NAME_PREFIX (u16) 0x2e -#define AML_MULTI_NAME_PREFIX_OP (u16) 0x2f -#define AML_NAME_CHAR_SUBSEQ (u16) 0x30 -#define AML_NAME_CHAR_FIRST (u16) 0x41 -#define AML_EXTENDED_OP_PREFIX (u16) 0x5b +#define AML_MULTI_NAME_PREFIX (u16) 0x2f +#define AML_EXTENDED_PREFIX (u16) 0x5b #define AML_ROOT_PREFIX (u16) 0x5c #define AML_PARENT_PREFIX (u16) 0x5e -#define AML_LOCAL_OP (u16) 0x60 +#define AML_FIRST_LOCAL_OP (u16) 0x60 /* Used for Local op # calculations */ #define AML_LOCAL0 (u16) 0x60 #define AML_LOCAL1 (u16) 0x61 #define AML_LOCAL2 (u16) 0x62 @@ -82,7 +77,7 @@ #define AML_LOCAL5 (u16) 0x65 #define AML_LOCAL6 (u16) 0x66 #define AML_LOCAL7 (u16) 0x67 -#define AML_ARG_OP (u16) 0x68 +#define AML_FIRST_ARG_OP (u16) 0x68 /* Used for Arg op # calculations */ #define AML_ARG0 (u16) 0x68 #define AML_ARG1 (u16) 0x69 #define AML_ARG2 (u16) 0x6a @@ -93,7 +88,7 @@ #define AML_STORE_OP (u16) 0x70 #define AML_REF_OF_OP (u16) 0x71 #define AML_ADD_OP (u16) 0x72 -#define AML_CONCAT_OP (u16) 0x73 +#define AML_CONCATENATE_OP (u16) 0x73 #define AML_SUBTRACT_OP (u16) 0x74 #define AML_INCREMENT_OP (u16) 0x75 #define AML_DECREMENT_OP (u16) 0x76 @@ -110,7 +105,7 @@ #define AML_FIND_SET_LEFT_BIT_OP (u16) 0x81 #define AML_FIND_SET_RIGHT_BIT_OP (u16) 0x82 #define AML_DEREF_OF_OP (u16) 0x83 -#define AML_CONCAT_RES_OP (u16) 0x84 /* ACPI 2.0 */ +#define AML_CONCATENATE_TEMPLATE_OP (u16) 0x84 /* ACPI 2.0 */ #define AML_MOD_OP (u16) 0x85 /* ACPI 2.0 */ #define AML_NOTIFY_OP (u16) 0x86 #define AML_SIZE_OF_OP (u16) 0x87 @@ -122,18 +117,18 @@ #define AML_CREATE_BIT_FIELD_OP (u16) 0x8d #define AML_OBJECT_TYPE_OP (u16) 0x8e #define AML_CREATE_QWORD_FIELD_OP (u16) 0x8f /* ACPI 2.0 */ -#define AML_LAND_OP (u16) 0x90 -#define AML_LOR_OP (u16) 0x91 -#define AML_LNOT_OP (u16) 0x92 -#define AML_LEQUAL_OP (u16) 0x93 -#define AML_LGREATER_OP (u16) 0x94 -#define AML_LLESS_OP (u16) 0x95 +#define AML_LOGICAL_AND_OP (u16) 0x90 +#define AML_LOGICAL_OR_OP (u16) 0x91 +#define AML_LOGICAL_NOT_OP (u16) 0x92 +#define AML_LOGICAL_EQUAL_OP (u16) 0x93 +#define AML_LOGICAL_GREATER_OP (u16) 0x94 +#define AML_LOGICAL_LESS_OP (u16) 0x95 #define AML_TO_BUFFER_OP (u16) 0x96 /* ACPI 2.0 */ -#define AML_TO_DECSTRING_OP (u16) 0x97 /* ACPI 2.0 */ -#define AML_TO_HEXSTRING_OP (u16) 0x98 /* ACPI 2.0 */ +#define AML_TO_DECIMAL_STRING_OP (u16) 0x97 /* ACPI 2.0 */ +#define AML_TO_HEX_STRING_OP (u16) 0x98 /* ACPI 2.0 */ #define AML_TO_INTEGER_OP (u16) 0x99 /* ACPI 2.0 */ #define AML_TO_STRING_OP (u16) 0x9c /* ACPI 2.0 */ -#define AML_COPY_OP (u16) 0x9d /* ACPI 2.0 */ +#define AML_COPY_OBJECT_OP (u16) 0x9d /* ACPI 2.0 */ #define AML_MID_OP (u16) 0x9e /* ACPI 2.0 */ #define AML_CONTINUE_OP (u16) 0x9f /* ACPI 2.0 */ #define AML_IF_OP (u16) 0xa0 @@ -142,18 +137,27 @@ #define AML_NOOP_OP (u16) 0xa3 #define AML_RETURN_OP (u16) 0xa4 #define AML_BREAK_OP (u16) 0xa5 -#define AML_BREAK_POINT_OP (u16) 0xcc +#define AML_COMMENT_OP (u16) 0xa9 +#define AML_BREAKPOINT_OP (u16) 0xcc #define AML_ONES_OP (u16) 0xff -/* prefixed opcodes */ +/* + * Combination opcodes (actually two one-byte opcodes) + * Used by the disassembler and iASL compiler + */ +#define AML_LOGICAL_GREATER_EQUAL_OP (u16) 0x9295 /* LNot (LLess) */ +#define AML_LOGICAL_LESS_EQUAL_OP (u16) 0x9294 /* LNot (LGreater) */ +#define AML_LOGICAL_NOT_EQUAL_OP (u16) 0x9293 /* LNot (LEqual) */ + +/* Prefixed (2-byte) opcodes (with AML_EXTENDED_PREFIX) */ -#define AML_EXTENDED_OPCODE (u16) 0x5b00 /* prefix for 2-byte opcodes */ +#define AML_EXTENDED_OPCODE (u16) 0x5b00 /* Prefix for 2-byte opcodes */ #define AML_MUTEX_OP (u16) 0x5b01 #define AML_EVENT_OP (u16) 0x5b02 -#define AML_SHIFT_RIGHT_BIT_OP (u16) 0x5b10 -#define AML_SHIFT_LEFT_BIT_OP (u16) 0x5b11 -#define AML_COND_REF_OF_OP (u16) 0x5b12 +#define AML_SHIFT_RIGHT_BIT_OP (u16) 0x5b10 /* Obsolete, not in ACPI spec */ +#define AML_SHIFT_LEFT_BIT_OP (u16) 0x5b11 /* Obsolete, not in ACPI spec */ +#define AML_CONDITIONAL_REF_OF_OP (u16) 0x5b12 #define AML_CREATE_FIELD_OP (u16) 0x5b13 #define AML_LOAD_TABLE_OP (u16) 0x5b1f /* ACPI 2.0 */ #define AML_LOAD_OP (u16) 0x5b20 @@ -175,21 +179,13 @@ #define AML_FIELD_OP (u16) 0x5b81 #define AML_DEVICE_OP (u16) 0x5b82 #define AML_PROCESSOR_OP (u16) 0x5b83 -#define AML_POWER_RES_OP (u16) 0x5b84 +#define AML_POWER_RESOURCE_OP (u16) 0x5b84 #define AML_THERMAL_ZONE_OP (u16) 0x5b85 #define AML_INDEX_FIELD_OP (u16) 0x5b86 #define AML_BANK_FIELD_OP (u16) 0x5b87 #define AML_DATA_REGION_OP (u16) 0x5b88 /* ACPI 2.0 */ /* - * Combination opcodes (actually two one-byte opcodes) - * Used by the disassembler and iASL compiler - */ -#define AML_LGREATEREQUAL_OP (u16) 0x9295 -#define AML_LLESSEQUAL_OP (u16) 0x9294 -#define AML_LNOTEQUAL_OP (u16) 0x9293 - -/* * Opcodes for "Field" operators */ #define AML_FIELD_OFFSET_OP (u8) 0x00 @@ -241,6 +237,7 @@ #define ARGP_SIMPLENAME 0x12 /* name_string | local_term | arg_term */ #define ARGP_NAME_OR_REF 0x13 /* For object_type only */ #define ARGP_MAX 0x13 +#define ARGP_COMMENT 0x14 /* * Resolved argument types for the AML Interpreter @@ -308,24 +305,19 @@ #define ARGI_INVALID_OPCODE 0xFFFFFFFF /* - * hash offsets - */ -#define AML_EXTOP_HASH_OFFSET 22 -#define AML_LNOT_HASH_OFFSET 19 - -/* - * opcode groups and types + * Some of the flags and types below are of the form: + * + * AML_FLAGS_EXEC_#A_#T,#R, or + * AML_TYPE_EXEC_#A_#T,#R where: + * + * #A is the number of required arguments + * #T is the number of target operands + * #R indicates whether there is a return value */ -#define OPGRP_NAMED 0x01 -#define OPGRP_FIELD 0x02 -#define OPGRP_BYTELIST 0x04 /* - * Opcode information + * Opcode information flags */ - -/* Opcode flags */ - #define AML_LOGICAL 0x0001 #define AML_LOGICAL_NUMERIC 0x0002 #define AML_MATH 0x0004 @@ -342,7 +334,7 @@ #define AML_CONSTANT 0x2000 #define AML_NO_OPERAND_RESOLVE 0x4000 -/* Convenient flag groupings */ +/* Convenient flag groupings of the flags above */ #define AML_FLAGS_EXEC_0A_0T_1R AML_HAS_RETVAL #define AML_FLAGS_EXEC_1A_0T_0R AML_HAS_ARGS /* Monadic1 */ @@ -359,7 +351,7 @@ /* * The opcode Type is used in a dispatch table, do not change - * without updating the table. + * or add anything new without updating the table. */ #define AML_TYPE_EXEC_0A_0T_1R 0x00 #define AML_TYPE_EXEC_1A_0T_0R 0x01 /* Monadic1 */ @@ -385,7 +377,7 @@ #define AML_TYPE_METHOD_CALL 0x10 -/* Misc */ +/* Miscellaneous types */ #define AML_TYPE_CREATE_FIELD 0x11 #define AML_TYPE_CREATE_OBJECT 0x12 @@ -395,7 +387,6 @@ #define AML_TYPE_NAMED_SIMPLE 0x16 #define AML_TYPE_NAMED_COMPLEX 0x17 #define AML_TYPE_RETURN 0x18 - #define AML_TYPE_UNDEFINED 0x19 #define AML_TYPE_BOGUS 0x1A diff --git a/drivers/acpi/acpica/dbmethod.c b/drivers/acpi/acpica/dbmethod.c index 15c8237b8a80..df62c9245efc 100644 --- a/drivers/acpi/acpica/dbmethod.c +++ b/drivers/acpi/acpica/dbmethod.c @@ -422,6 +422,7 @@ acpi_db_walk_for_execute(acpi_handle obj_handle, status = acpi_get_object_info(obj_handle, &obj_info); if (ACPI_FAILURE(status)) { + ACPI_FREE(pathname); return (status); } diff --git a/drivers/acpi/acpica/dbxface.c b/drivers/acpi/acpica/dbxface.c index 205b8e0eded5..8f665d94b8b5 100644 --- a/drivers/acpi/acpica/dbxface.c +++ b/drivers/acpi/acpica/dbxface.c @@ -45,6 +45,7 @@ #include "accommon.h" #include "amlcode.h" #include "acdebug.h" +#include "acinterp.h" #define _COMPONENT ACPI_CA_DEBUGGER ACPI_MODULE_NAME("dbxface") @@ -125,7 +126,7 @@ error_exit: * * RETURN: Status * - * DESCRIPTION: Called for AML_BREAK_POINT_OP + * DESCRIPTION: Called for AML_BREAKPOINT_OP * ******************************************************************************/ @@ -368,7 +369,9 @@ acpi_db_single_step(struct acpi_walk_state *walk_state, walk_state->method_breakpoint = 1; /* Must be non-zero! */ } + acpi_ex_exit_interpreter(); status = acpi_db_start_command(walk_state, op); + acpi_ex_enter_interpreter(); /* User commands complete, continue execution of the interrupted method */ diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c index d31b49feaa79..f470e81b0499 100644 --- a/drivers/acpi/acpica/dscontrol.c +++ b/drivers/acpi/acpica/dscontrol.c @@ -347,7 +347,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state, break; - case AML_BREAK_POINT_OP: + case AML_BREAKPOINT_OP: acpi_db_signal_break_point(walk_state); diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c index adcc72cd53a7..27a7de95f7b0 100644 --- a/drivers/acpi/acpica/dsmthdat.c +++ b/drivers/acpi/acpica/dsmthdat.c @@ -672,7 +672,8 @@ acpi_ds_store_object_to_local(u8 type, * * FUNCTION: acpi_ds_method_data_get_type * - * PARAMETERS: opcode - Either AML_LOCAL_OP or AML_ARG_OP + * PARAMETERS: opcode - Either AML_FIRST LOCAL_OP or + * AML_FIRST_ARG_OP * index - Which Local or Arg whose type to get * walk_state - Current walk state object * diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c index 8deaa16493a0..7df3152ed856 100644 --- a/drivers/acpi/acpica/dsobject.c +++ b/drivers/acpi/acpica/dsobject.c @@ -114,7 +114,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) || (op->common.parent->common.aml_opcode == - AML_VAR_PACKAGE_OP))) { + AML_VARIABLE_PACKAGE_OP))) { /* * We didn't find the target and we are populating elements * of a package - ignore if slack enabled. Some ASL code @@ -144,7 +144,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) || (op->common.parent->common.aml_opcode == - AML_VAR_PACKAGE_OP)) { + AML_VARIABLE_PACKAGE_OP)) { /* * Attempt to resolve the node to a value before we insert it into * the package. If this is a reference to a common data type, @@ -398,7 +398,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, parent = op->common.parent; while ((parent->common.aml_opcode == AML_PACKAGE_OP) || - (parent->common.aml_opcode == AML_VAR_PACKAGE_OP)) { + (parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) { parent = parent->common.parent; } @@ -769,10 +769,10 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state, switch (op_info->type) { case AML_TYPE_LOCAL_VARIABLE: - /* Local ID (0-7) is (AML opcode - base AML_LOCAL_OP) */ + /* Local ID (0-7) is (AML opcode - base AML_FIRST_LOCAL_OP) */ obj_desc->reference.value = - ((u32)opcode) - AML_LOCAL_OP; + ((u32)opcode) - AML_FIRST_LOCAL_OP; obj_desc->reference.class = ACPI_REFCLASS_LOCAL; #ifndef ACPI_NO_METHOD_EXECUTION @@ -790,9 +790,10 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state, case AML_TYPE_METHOD_ARGUMENT: - /* Arg ID (0-6) is (AML opcode - base AML_ARG_OP) */ + /* Arg ID (0-6) is (AML opcode - base AML_FIRST_ARG_OP) */ - obj_desc->reference.value = ((u32)opcode) - AML_ARG_OP; + obj_desc->reference.value = + ((u32)opcode) - AML_FIRST_ARG_OP; obj_desc->reference.class = ACPI_REFCLASS_ARG; #ifndef ACPI_NO_METHOD_EXECUTION diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index 148523205d41..9a8f8a992b3e 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -639,7 +639,7 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state, break; case AML_PACKAGE_OP: - case AML_VAR_PACKAGE_OP: + case AML_VARIABLE_PACKAGE_OP: status = acpi_ds_build_internal_package_obj(walk_state, op, length, @@ -660,7 +660,7 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state, if ((!op->common.parent) || ((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) && (op->common.parent->common.aml_opcode != - AML_VAR_PACKAGE_OP) + AML_VARIABLE_PACKAGE_OP) && (op->common.parent->common.aml_opcode != AML_NAME_OP))) { walk_state->result_obj = obj_desc; diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c index 049fbab4e5a6..406edec20de7 100644 --- a/drivers/acpi/acpica/dsutils.c +++ b/drivers/acpi/acpica/dsutils.c @@ -275,10 +275,10 @@ acpi_ds_is_result_used(union acpi_parse_object * op, if ((op->common.parent->common.aml_opcode == AML_REGION_OP) || (op->common.parent->common.aml_opcode == AML_DATA_REGION_OP) || (op->common.parent->common.aml_opcode == AML_PACKAGE_OP) - || (op->common.parent->common.aml_opcode == - AML_VAR_PACKAGE_OP) || (op->common.parent->common.aml_opcode == AML_BUFFER_OP) || (op->common.parent->common.aml_opcode == + AML_VARIABLE_PACKAGE_OP) + || (op->common.parent->common.aml_opcode == AML_INT_EVAL_SUBTREE_OP) || (op->common.parent->common.aml_opcode == AML_BANK_FIELD_OP)) { @@ -551,7 +551,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, */ if (status == AE_NOT_FOUND) { if (parent_op->common.aml_opcode == - AML_COND_REF_OF_OP) { + AML_CONDITIONAL_REF_OF_OP) { /* * For the Conditional Reference op, it's OK if * the name is not found; We just need a way to @@ -806,7 +806,7 @@ acpi_status acpi_ds_evaluate_name_path(struct acpi_walk_state *walk_state) } if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) || - (op->common.parent->common.aml_opcode == AML_VAR_PACKAGE_OP) || + (op->common.parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP) || (op->common.parent->common.aml_opcode == AML_REF_OF_OP)) { /* TBD: Should we specify this feature as a bit of op_info->Flags of these opcodes? */ diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index 78f8e6a4f72f..a2ff8ad70d58 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c @@ -497,7 +497,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) if ((op->asl.parent) && ((op->asl.parent->asl.aml_opcode == AML_PACKAGE_OP) || (op->asl.parent->asl.aml_opcode == - AML_VAR_PACKAGE_OP))) { + AML_VARIABLE_PACKAGE_OP))) { ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Method Reference in a Package, Op=%p\n", op)); diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index 44d4553dfbdd..8d510c7e20c8 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c @@ -528,7 +528,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) status = acpi_ex_create_processor(walk_state); break; - case AML_POWER_RES_OP: + case AML_POWER_RESOURCE_OP: status = acpi_ex_create_power_resource(walk_state); break; diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index 1a6f59079ea5..f222a80ca38e 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c @@ -249,14 +249,14 @@ acpi_ex_do_logical_numeric_op(u16 opcode, ACPI_FUNCTION_TRACE(ex_do_logical_numeric_op); switch (opcode) { - case AML_LAND_OP: /* LAnd (Integer0, Integer1) */ + case AML_LOGICAL_AND_OP: /* LAnd (Integer0, Integer1) */ if (integer0 && integer1) { local_result = TRUE; } break; - case AML_LOR_OP: /* LOr (Integer0, Integer1) */ + case AML_LOGICAL_OR_OP: /* LOr (Integer0, Integer1) */ if (integer0 || integer1) { local_result = TRUE; @@ -365,21 +365,21 @@ acpi_ex_do_logical_op(u16 opcode, integer1 = local_operand1->integer.value; switch (opcode) { - case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */ + case AML_LOGICAL_EQUAL_OP: /* LEqual (Operand0, Operand1) */ if (integer0 == integer1) { local_result = TRUE; } break; - case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */ + case AML_LOGICAL_GREATER_OP: /* LGreater (Operand0, Operand1) */ if (integer0 > integer1) { local_result = TRUE; } break; - case AML_LLESS_OP: /* LLess (Operand0, Operand1) */ + case AML_LOGICAL_LESS_OP: /* LLess (Operand0, Operand1) */ if (integer0 < integer1) { local_result = TRUE; @@ -408,7 +408,7 @@ acpi_ex_do_logical_op(u16 opcode, (length0 > length1) ? length1 : length0); switch (opcode) { - case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */ + case AML_LOGICAL_EQUAL_OP: /* LEqual (Operand0, Operand1) */ /* Length and all bytes must be equal */ @@ -420,7 +420,7 @@ acpi_ex_do_logical_op(u16 opcode, } break; - case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */ + case AML_LOGICAL_GREATER_OP: /* LGreater (Operand0, Operand1) */ if (compare > 0) { local_result = TRUE; @@ -437,7 +437,7 @@ acpi_ex_do_logical_op(u16 opcode, } break; - case AML_LLESS_OP: /* LLess (Operand0, Operand1) */ + case AML_LOGICAL_LESS_OP: /* LLess (Operand0, Operand1) */ if (compare > 0) { goto cleanup; /* FALSE */ diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c index ee7b62a86661..caa5ed1f65ec 100644 --- a/drivers/acpi/acpica/exnames.c +++ b/drivers/acpi/acpica/exnames.c @@ -122,7 +122,7 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs) /* Set up multi prefixes */ - *temp_ptr++ = AML_MULTI_NAME_PREFIX_OP; + *temp_ptr++ = AML_MULTI_NAME_PREFIX; *temp_ptr++ = (char)num_name_segs; } else if (2 == num_name_segs) { @@ -342,7 +342,7 @@ acpi_ex_get_name_string(acpi_object_type data_type, } break; - case AML_MULTI_NAME_PREFIX_OP: + case AML_MULTI_NAME_PREFIX: ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "MultiNamePrefix at %p\n", diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index af73fcde7e5c..e327349675cd 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c @@ -274,7 +274,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state) case AML_FIND_SET_RIGHT_BIT_OP: case AML_FROM_BCD_OP: case AML_TO_BCD_OP: - case AML_COND_REF_OF_OP: + case AML_CONDITIONAL_REF_OF_OP: /* Create a return object of type Integer for these opcodes */ @@ -405,7 +405,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state) } break; - case AML_COND_REF_OF_OP: /* cond_ref_of (source_object, Result) */ + case AML_CONDITIONAL_REF_OF_OP: /* cond_ref_of (source_object, Result) */ /* * This op is a little strange because the internal return value is * different than the return value stored in the result descriptor @@ -475,14 +475,14 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state) /* * ACPI 2.0 Opcodes */ - case AML_COPY_OP: /* Copy (Source, Target) */ + case AML_COPY_OBJECT_OP: /* copy_object (Source, Target) */ status = acpi_ut_copy_iobject_to_iobject(operand[0], &return_desc, walk_state); break; - case AML_TO_DECSTRING_OP: /* to_decimal_string (Data, Result) */ + case AML_TO_DECIMAL_STRING_OP: /* to_decimal_string (Data, Result) */ status = acpi_ex_convert_to_string(operand[0], &return_desc, @@ -495,7 +495,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state) } break; - case AML_TO_HEXSTRING_OP: /* to_hex_string (Data, Result) */ + case AML_TO_HEX_STRING_OP: /* to_hex_string (Data, Result) */ status = acpi_ex_convert_to_string(operand[0], &return_desc, @@ -603,7 +603,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) /* Examine the AML opcode */ switch (walk_state->opcode) { - case AML_LNOT_OP: /* LNot (Operand) */ + case AML_LOGICAL_NOT_OP: /* LNot (Operand) */ return_desc = acpi_ut_create_integer_object((u64) 0); if (!return_desc) { @@ -652,9 +652,8 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) * NOTE: We use LNOT_OP here in order to force resolution of the * reference operand to an actual integer. */ - status = - acpi_ex_resolve_operands(AML_LNOT_OP, &temp_desc, - walk_state); + status = acpi_ex_resolve_operands(AML_LOGICAL_NOT_OP, + &temp_desc, walk_state); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "While resolving operands for [%s]", diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c index 44ecba50c0da..eecb3bff7fd7 100644 --- a/drivers/acpi/acpica/exoparg2.c +++ b/drivers/acpi/acpica/exoparg2.c @@ -298,7 +298,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state) NULL, &return_desc->integer.value); break; - case AML_CONCAT_OP: /* Concatenate (Data1, Data2, Result) */ + case AML_CONCATENATE_OP: /* Concatenate (Data1, Data2, Result) */ status = acpi_ex_do_concatenate(operand[0], operand[1], &return_desc, @@ -343,7 +343,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state) operand[0]->buffer.pointer, length); break; - case AML_CONCAT_RES_OP: + case AML_CONCATENATE_TEMPLATE_OP: /* concatenate_res_template (Buffer, Buffer, Result) (ACPI 2.0) */ diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c index 31e4df97cbe1..688032b58a21 100644 --- a/drivers/acpi/acpica/exoparg6.c +++ b/drivers/acpi/acpica/exoparg6.c @@ -124,8 +124,8 @@ acpi_ex_do_match(u32 match_op, * Change to: (M == P[i]) */ status = - acpi_ex_do_logical_op(AML_LEQUAL_OP, match_obj, package_obj, - &logical_result); + acpi_ex_do_logical_op(AML_LOGICAL_EQUAL_OP, match_obj, + package_obj, &logical_result); if (ACPI_FAILURE(status)) { return (FALSE); } @@ -137,8 +137,8 @@ acpi_ex_do_match(u32 match_op, * Change to: (M >= P[i]) (M not_less than P[i]) */ status = - acpi_ex_do_logical_op(AML_LLESS_OP, match_obj, package_obj, - &logical_result); + acpi_ex_do_logical_op(AML_LOGICAL_LESS_OP, match_obj, + package_obj, &logical_result); if (ACPI_FAILURE(status)) { return (FALSE); } @@ -151,7 +151,7 @@ acpi_ex_do_match(u32 match_op, * Change to: (M > P[i]) */ status = - acpi_ex_do_logical_op(AML_LGREATER_OP, match_obj, + acpi_ex_do_logical_op(AML_LOGICAL_GREATER_OP, match_obj, package_obj, &logical_result); if (ACPI_FAILURE(status)) { return (FALSE); @@ -164,7 +164,7 @@ acpi_ex_do_match(u32 match_op, * Change to: (M <= P[i]) (M not_greater than P[i]) */ status = - acpi_ex_do_logical_op(AML_LGREATER_OP, match_obj, + acpi_ex_do_logical_op(AML_LOGICAL_GREATER_OP, match_obj, package_obj, &logical_result); if (ACPI_FAILURE(status)) { return (FALSE); @@ -178,8 +178,8 @@ acpi_ex_do_match(u32 match_op, * Change to: (M < P[i]) */ status = - acpi_ex_do_logical_op(AML_LLESS_OP, match_obj, package_obj, - &logical_result); + acpi_ex_do_logical_op(AML_LOGICAL_LESS_OP, match_obj, + package_obj, &logical_result); if (ACPI_FAILURE(status)) { return (FALSE); } diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c index 7fecefc2e1b4..aa8c6fd74cc3 100644 --- a/drivers/acpi/acpica/exresolv.c +++ b/drivers/acpi/acpica/exresolv.c @@ -196,7 +196,8 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr, if ((walk_state->opcode == AML_INT_METHODCALL_OP) - || (walk_state->opcode == AML_COPY_OP)) { + || (walk_state->opcode == + AML_COPY_OBJECT_OP)) { break; } diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c index a2f8001aeb86..bdd43cde8f36 100644 --- a/drivers/acpi/acpica/exstore.c +++ b/drivers/acpi/acpica/exstore.c @@ -416,7 +416,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc, /* Only limited target types possible for everything except copy_object */ - if (walk_state->opcode != AML_COPY_OP) { + if (walk_state->opcode != AML_COPY_OBJECT_OP) { /* * Only copy_object allows all object types to be overwritten. For * target_ref(s), there are restrictions on the object types that @@ -499,7 +499,8 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc, case ACPI_TYPE_STRING: case ACPI_TYPE_BUFFER: - if ((walk_state->opcode == AML_COPY_OP) || !implicit_conversion) { + if ((walk_state->opcode == AML_COPY_OBJECT_OP) || + !implicit_conversion) { /* * However, copy_object and Stores to arg_x do not perform * an implicit conversion, as per the ACPI specification. diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c index 85db4716a043..56f59cf5da29 100644 --- a/drivers/acpi/acpica/exstoren.c +++ b/drivers/acpi/acpica/exstoren.c @@ -107,7 +107,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr, /* For copy_object, no further validation necessary */ - if (walk_state->opcode == AML_COPY_OP) { + if (walk_state->opcode == AML_COPY_OBJECT_OP) { break; } diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index 531620abed80..3094cec4eab4 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c @@ -102,7 +102,7 @@ static const struct acpi_port_info acpi_protected_ports[] = { {"PCI", 0x0CF8, 0x0CFF, ACPI_OSI_WIN_XP} }; -#define ACPI_PORT_INFO_ENTRIES ACPI_ARRAY_LENGTH (acpi_protected_ports) +#define ACPI_PORT_INFO_ENTRIES ACPI_ARRAY_LENGTH (acpi_protected_ports) /****************************************************************************** * @@ -128,7 +128,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width) acpi_io_address last_address; const struct acpi_port_info *port_info; - ACPI_FUNCTION_TRACE(hw_validate_io_request); + ACPI_FUNCTION_NAME(hw_validate_io_request); /* Supported widths are 8/16/32 */ @@ -153,13 +153,13 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width) ACPI_ERROR((AE_INFO, "Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X", ACPI_FORMAT_UINT64(address), byte_width)); - return_ACPI_STATUS(AE_LIMIT); + return (AE_LIMIT); } /* Exit if requested address is not within the protected port table */ if (address > acpi_protected_ports[ACPI_PORT_INFO_ENTRIES - 1].end) { - return_ACPI_STATUS(AE_OK); + return (AE_OK); } /* Check request against the list of protected I/O ports */ @@ -167,7 +167,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width) for (i = 0; i < ACPI_PORT_INFO_ENTRIES; i++, port_info++) { /* * Check if the requested address range will write to a reserved - * port. Four cases to consider: + * port. There are four cases to consider: * * 1) Address range is contained completely in the port address range * 2) Address range overlaps port range at the port range start @@ -198,7 +198,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width) } } - return_ACPI_STATUS(AE_OK); + return (AE_OK); } /****************************************************************************** @@ -206,7 +206,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width) * FUNCTION: acpi_hw_read_port * * PARAMETERS: Address Address of I/O port/register to read - * Value Where value is placed + * Value Where value (data) is returned * Width Number of bits * * RETURN: Status and value read from port @@ -244,7 +244,7 @@ acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width) /* * There has been a protection violation within the request. Fall * back to byte granularity port I/O and ignore the failing bytes. - * This provides Windows compatibility. + * This provides compatibility with other ACPI implementations. */ for (i = 0, *value = 0; i < width; i += 8) { @@ -307,7 +307,7 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width) /* * There has been a protection violation within the request. Fall * back to byte granularity port I/O and ignore the failing bytes. - * This provides Windows compatibility. + * This provides compatibility with other ACPI implementations. */ for (i = 0; i < width; i += 8) { diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c index 498bb8f70e6b..fb265b5737de 100644 --- a/drivers/acpi/acpica/nsaccess.c +++ b/drivers/acpi/acpica/nsaccess.c @@ -485,7 +485,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info, flags)); break; - case AML_MULTI_NAME_PREFIX_OP: + case AML_MULTI_NAME_PREFIX: /* More than one name_seg, search rules do not apply */ diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c index 38316266521e..418ef2ac82ab 100644 --- a/drivers/acpi/acpica/nsrepair.c +++ b/drivers/acpi/acpica/nsrepair.c @@ -290,22 +290,12 @@ object_repaired: /* Object was successfully repaired */ if (package_index != ACPI_NOT_PACKAGE_ELEMENT) { - /* - * The original object is a package element. We need to - * decrement the reference count of the original object, - * for removing it from the package. - * - * However, if the original object was just wrapped with a - * package object as part of the repair, we don't need to - * change the reference count. - */ + + /* Update reference count of new object */ + if (!(info->return_flags & ACPI_OBJECT_WRAPPED)) { new_object->common.reference_count = return_object->common.reference_count; - - if (return_object->common.reference_count > 1) { - return_object->common.reference_count--; - } } ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c index 352265498e90..06037e044694 100644 --- a/drivers/acpi/acpica/nsrepair2.c +++ b/drivers/acpi/acpica/nsrepair2.c @@ -403,16 +403,12 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info, return (status); } - /* Take care with reference counts */ - if (original_element != *element_ptr) { - /* Element was replaced */ + /* Update reference count of new object */ (*element_ptr)->common.reference_count = original_ref_count; - - acpi_ut_remove_reference(original_element); } element_ptr++; diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index 661676714f7b..2fe87d0dd9d5 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c @@ -252,7 +252,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info) internal_name[1] = AML_DUAL_NAME_PREFIX; result = &internal_name[2]; } else { - internal_name[1] = AML_MULTI_NAME_PREFIX_OP; + internal_name[1] = AML_MULTI_NAME_PREFIX; internal_name[2] = (char)num_segments; result = &internal_name[3]; } @@ -274,7 +274,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info) internal_name[i] = AML_DUAL_NAME_PREFIX; result = &internal_name[(acpi_size)i + 1]; } else { - internal_name[i] = AML_MULTI_NAME_PREFIX_OP; + internal_name[i] = AML_MULTI_NAME_PREFIX; internal_name[(acpi_size)i + 1] = (char)num_segments; result = &internal_name[(acpi_size)i + 2]; } @@ -450,7 +450,7 @@ acpi_ns_externalize_name(u32 internal_name_length, */ if (prefix_length < internal_name_length) { switch (internal_name[prefix_length]) { - case AML_MULTI_NAME_PREFIX_OP: + case AML_MULTI_NAME_PREFIX: /* <count> 4-byte names */ @@ -594,25 +594,20 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle) void acpi_ns_terminate(void) { acpi_status status; + union acpi_operand_object *prev; + union acpi_operand_object *next; ACPI_FUNCTION_TRACE(ns_terminate); -#ifdef ACPI_EXEC_APP - { - union acpi_operand_object *prev; - union acpi_operand_object *next; + /* Delete any module-level code blocks */ - /* Delete any module-level code blocks */ - - next = acpi_gbl_module_code_list; - while (next) { - prev = next; - next = next->method.mutex; - prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */ - acpi_ut_remove_reference(prev); - } + next = acpi_gbl_module_code_list; + while (next) { + prev = next; + next = next->method.mutex; + prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */ + acpi_ut_remove_reference(prev); } -#endif /* * Free the entire namespace -- all nodes and all objects diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index 05b62ad44c3e..eb9dfaca555f 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c @@ -47,6 +47,7 @@ #include "amlcode.h" #include "acnamesp.h" #include "acdispat.h" +#include "acconvert.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psargs") @@ -186,7 +187,7 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state) end += 1 + (2 * ACPI_NAME_SIZE); break; - case AML_MULTI_NAME_PREFIX_OP: + case AML_MULTI_NAME_PREFIX: /* Multiple name segments, 4 chars each, count in next byte */ @@ -339,7 +340,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state, /* 2) not_found during a cond_ref_of(x) is ok by definition */ else if (walk_state->op->common.aml_opcode == - AML_COND_REF_OF_OP) { + AML_CONDITIONAL_REF_OF_OP) { status = AE_OK; } @@ -352,7 +353,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state, ((arg->common.parent->common.aml_opcode == AML_PACKAGE_OP) || (arg->common.parent->common.aml_opcode == - AML_VAR_PACKAGE_OP))) { + AML_VARIABLE_PACKAGE_OP))) { status = AE_OK; } } @@ -502,6 +503,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state ACPI_FUNCTION_TRACE(ps_get_next_field); + ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state); aml = parser_state->aml; /* Determine field type */ @@ -546,6 +548,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state /* Decode the field type */ + ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state); switch (opcode) { case AML_INT_NAMEDFIELD_OP: @@ -555,6 +558,22 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state acpi_ps_set_name(field, name); parser_state->aml += ACPI_NAME_SIZE; + ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state); + +#ifdef ACPI_ASL_COMPILER + /* + * Because the package length isn't represented as a parse tree object, + * take comments surrounding this and add to the previously created + * parse node. + */ + if (field->common.inline_comment) { + field->common.name_comment = + field->common.inline_comment; + } + field->common.inline_comment = acpi_gbl_current_inline_comment; + acpi_gbl_current_inline_comment = NULL; +#endif + /* Get the length which is encoded as a package length */ field->common.value.size = @@ -609,11 +628,13 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) { parser_state->aml++; + ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state); pkg_end = parser_state->aml; pkg_length = acpi_ps_get_next_package_length(parser_state); pkg_end += pkg_length; + ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state); if (parser_state->aml < pkg_end) { /* Non-empty list */ @@ -630,6 +651,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state opcode = ACPI_GET8(parser_state->aml); parser_state->aml++; + ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state); switch (opcode) { case AML_BYTE_OP: /* AML_BYTEDATA_ARG */ @@ -660,6 +682,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state /* Fill in bytelist data */ + ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state); arg->named.value.size = buffer_length; arg->named.data = parser_state->aml; } diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index 14d689606d2f..b4224005783c 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -55,6 +55,7 @@ #include "acparser.h" #include "acdispat.h" #include "amlcode.h" +#include "acconvert.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psloop") @@ -132,6 +133,21 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, !walk_state->arg_count) { walk_state->aml = walk_state->parser_state.aml; + switch (op->common.aml_opcode) { + case AML_METHOD_OP: + case AML_BUFFER_OP: + case AML_PACKAGE_OP: + case AML_VARIABLE_PACKAGE_OP: + case AML_WHILE_OP: + + break; + + default: + + ASL_CV_CAPTURE_COMMENTS(walk_state); + break; + } + status = acpi_ps_get_next_arg(walk_state, &(walk_state->parser_state), @@ -254,7 +270,7 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, case AML_BUFFER_OP: case AML_PACKAGE_OP: - case AML_VAR_PACKAGE_OP: + case AML_VARIABLE_PACKAGE_OP: if ((op->common.parent) && (op->common.parent->common.aml_opcode == @@ -480,6 +496,8 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) /* Iterative parsing loop, while there is more AML to process: */ while ((parser_state->aml < parser_state->aml_end) || (op)) { + ASL_CV_CAPTURE_COMMENTS(walk_state); + aml_op_start = parser_state->aml; if (!op) { status = @@ -516,6 +534,20 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) */ walk_state->arg_count = 0; + switch (op->common.aml_opcode) { + case AML_BYTE_OP: + case AML_WORD_OP: + case AML_DWORD_OP: + case AML_QWORD_OP: + + break; + + default: + + ASL_CV_CAPTURE_COMMENTS(walk_state); + break; + } + /* Are there any arguments that must be processed? */ if (walk_state->arg_types) { diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index 5c4aff0f4f26..5bcb61831706 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -45,6 +45,7 @@ #include "accommon.h" #include "acparser.h" #include "amlcode.h" +#include "acconvert.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psobject") @@ -190,6 +191,7 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state, */ while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) && (GET_CURRENT_ARG_TYPE(walk_state->arg_types) != ARGP_NAME)) { + ASL_CV_CAPTURE_COMMENTS(walk_state); status = acpi_ps_get_next_arg(walk_state, &(walk_state->parser_state), @@ -203,6 +205,18 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state, INCREMENT_ARG_LIST(walk_state->arg_types); } + /* are there any inline comments associated with the name_seg?? If so, save this. */ + + ASL_CV_CAPTURE_COMMENTS(walk_state); + +#ifdef ACPI_ASL_COMPILER + if (acpi_gbl_current_inline_comment != NULL) { + unnamed_op->common.name_comment = + acpi_gbl_current_inline_comment; + acpi_gbl_current_inline_comment = NULL; + } +#endif + /* * Make sure that we found a NAME and didn't run out of arguments */ @@ -243,6 +257,30 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state, acpi_ps_append_arg(*op, unnamed_op->common.value.arg); +#ifdef ACPI_ASL_COMPILER + + /* save any comments that might be associated with unnamed_op. */ + + (*op)->common.inline_comment = unnamed_op->common.inline_comment; + (*op)->common.end_node_comment = unnamed_op->common.end_node_comment; + (*op)->common.close_brace_comment = + unnamed_op->common.close_brace_comment; + (*op)->common.name_comment = unnamed_op->common.name_comment; + (*op)->common.comment_list = unnamed_op->common.comment_list; + (*op)->common.end_blk_comment = unnamed_op->common.end_blk_comment; + (*op)->common.cv_filename = unnamed_op->common.cv_filename; + (*op)->common.cv_parent_filename = + unnamed_op->common.cv_parent_filename; + (*op)->named.aml = unnamed_op->common.aml; + + unnamed_op->common.inline_comment = NULL; + unnamed_op->common.end_node_comment = NULL; + unnamed_op->common.close_brace_comment = NULL; + unnamed_op->common.name_comment = NULL; + unnamed_op->common.comment_list = NULL; + unnamed_op->common.end_blk_comment = NULL; +#endif + if ((*op)->common.aml_opcode == AML_REGION_OP || (*op)->common.aml_opcode == AML_DATA_REGION_OP) { /* diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index 451b672915f1..c343a0d5a3d2 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c @@ -69,7 +69,7 @@ ACPI_MODULE_NAME("psopcode") AML_DEVICE_OP AML_THERMAL_ZONE_OP AML_METHOD_OP - AML_POWER_RES_OP + AML_POWER_RESOURCE_OP AML_PROCESSOR_OP AML_FIELD_OP AML_INDEX_FIELD_OP @@ -95,7 +95,7 @@ ACPI_MODULE_NAME("psopcode") AML_DEVICE_OP AML_THERMAL_ZONE_OP AML_METHOD_OP - AML_POWER_RES_OP + AML_POWER_RESOURCE_OP AML_PROCESSOR_OP AML_FIELD_OP AML_INDEX_FIELD_OP @@ -113,7 +113,7 @@ ACPI_MODULE_NAME("psopcode") AML_DEVICE_OP AML_THERMAL_ZONE_OP AML_METHOD_OP - AML_POWER_RES_OP + AML_POWER_RESOURCE_OP AML_PROCESSOR_OP AML_NAME_OP AML_ALIAS_OP @@ -136,7 +136,7 @@ ACPI_MODULE_NAME("psopcode") AML_DEVICE_OP AML_THERMAL_ZONE_OP AML_METHOD_OP - AML_POWER_RES_OP + AML_POWER_RESOURCE_OP AML_PROCESSOR_OP AML_NAME_OP AML_ALIAS_OP @@ -149,7 +149,7 @@ ACPI_MODULE_NAME("psopcode") must be deferred until needed AML_METHOD_OP - AML_VAR_PACKAGE_OP + AML_VARIABLE_PACKAGE_OP AML_CREATE_FIELD_OP AML_CREATE_BIT_FIELD_OP AML_CREATE_BYTE_FIELD_OP @@ -652,7 +652,10 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { /* 81 */ ACPI_OP("External", ARGP_EXTERNAL_OP, ARGI_EXTERNAL_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, /* ? */ - AML_TYPE_EXEC_3A_0T_0R, AML_FLAGS_EXEC_3A_0T_0R) + AML_TYPE_EXEC_3A_0T_0R, AML_FLAGS_EXEC_3A_0T_0R), +/* 82 */ ACPI_OP("Comment", ARGP_COMMENT_OP, ARGI_COMMENT_OP, + ACPI_TYPE_STRING, AML_CLASS_ARGUMENT, + AML_TYPE_LITERAL, AML_CONSTANT) /*! [End] no source code translation !*/ }; diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c index 89f95b7f26e9..eff22950232b 100644 --- a/drivers/acpi/acpica/psopinfo.c +++ b/drivers/acpi/acpica/psopinfo.c @@ -226,7 +226,7 @@ const u8 acpi_gbl_short_op_index[256] = { /* 0x90 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x73, 0x74, /* 0x98 */ 0x75, 0x76, _UNK, _UNK, 0x77, 0x78, 0x79, 0x7A, /* 0xA0 */ 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x60, 0x61, -/* 0xA8 */ 0x62, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, +/* 0xA8 */ 0x62, 0x82, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xB0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xB8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xC0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c index a813bbbd5a8b..8116a670de39 100644 --- a/drivers/acpi/acpica/psparse.c +++ b/drivers/acpi/acpica/psparse.c @@ -105,7 +105,7 @@ u16 acpi_ps_peek_opcode(struct acpi_parse_state * parser_state) aml = parser_state->aml; opcode = (u16) ACPI_GET8(aml); - if (opcode == AML_EXTENDED_OP_PREFIX) { + if (opcode == AML_EXTENDED_PREFIX) { /* Extended opcode, get the second opcode byte */ @@ -210,7 +210,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state *walk_state, || (op->common.parent->common.aml_opcode == AML_BANK_FIELD_OP) || (op->common.parent->common.aml_opcode == - AML_VAR_PACKAGE_OP)) { + AML_VARIABLE_PACKAGE_OP)) { replacement_op = acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP, op->common.aml); @@ -225,7 +225,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state *walk_state, if ((op->common.aml_opcode == AML_BUFFER_OP) || (op->common.aml_opcode == AML_PACKAGE_OP) || (op->common.aml_opcode == - AML_VAR_PACKAGE_OP)) { + AML_VARIABLE_PACKAGE_OP)) { replacement_op = acpi_ps_alloc_op(op->common. aml_opcode, diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c index 9677fff8fd47..c06d6e2fc7a5 100644 --- a/drivers/acpi/acpica/pstree.c +++ b/drivers/acpi/acpica/pstree.c @@ -45,6 +45,7 @@ #include "accommon.h" #include "acparser.h" #include "amlcode.h" +#include "acconvert.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("pstree") @@ -216,6 +217,7 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin, next = acpi_ps_get_arg(op, 0); if (next) { + ASL_CV_LABEL_FILENODE(next); return (next); } @@ -223,6 +225,7 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin, next = op->common.next; if (next) { + ASL_CV_LABEL_FILENODE(next); return (next); } @@ -233,6 +236,8 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin, while (parent) { arg = acpi_ps_get_arg(parent, 0); while (arg && (arg != origin) && (arg != op)) { + + ASL_CV_LABEL_FILENODE(arg); arg = arg->common.next; } @@ -247,6 +252,7 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin, /* Found sibling of parent */ + ASL_CV_LABEL_FILENODE(parent->common.next); return (parent->common.next); } @@ -254,6 +260,7 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin, parent = parent->common.parent; } + ASL_CV_LABEL_FILENODE(next); return (next); } @@ -296,7 +303,7 @@ union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op) child = acpi_ps_get_arg(op, 1); break; - case AML_POWER_RES_OP: + case AML_POWER_RESOURCE_OP: case AML_INDEX_FIELD_OP: child = acpi_ps_get_arg(op, 2); diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c index 2fa38bb76a55..02642760cb93 100644 --- a/drivers/acpi/acpica/psutils.c +++ b/drivers/acpi/acpica/psutils.c @@ -45,6 +45,7 @@ #include "accommon.h" #include "acparser.h" #include "amlcode.h" +#include "acconvert.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psutils") @@ -152,6 +153,15 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml) acpi_ps_init_op(op, opcode); op->common.aml = aml; op->common.flags = flags; + ASL_CV_CLEAR_OP_COMMENTS(op); + + if (opcode == AML_SCOPE_OP) { + acpi_gbl_current_scope = op; + } + } + + if (gbl_capture_comments) { + ASL_CV_TRANSFER_COMMENTS(op); } return (op); @@ -174,6 +184,7 @@ void acpi_ps_free_op(union acpi_parse_object *op) { ACPI_FUNCTION_NAME(ps_free_op); + ASL_CV_CLEAR_OP_COMMENTS(op); if (op->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Free retval op: %p\n", op)); diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c index a3401bd29413..5594a359dbf1 100644 --- a/drivers/acpi/acpica/utalloc.c +++ b/drivers/acpi/acpica/utalloc.c @@ -142,6 +142,45 @@ acpi_status acpi_ut_create_caches(void) if (ACPI_FAILURE(status)) { return (status); } +#ifdef ACPI_ASL_COMPILER + /* + * For use with the ASL-/ASL+ option. This cache keeps track of regular + * 0xA9 0x01 comments. + */ + status = + acpi_os_create_cache("Acpi-Comment", + sizeof(struct acpi_comment_node), + ACPI_MAX_COMMENT_CACHE_DEPTH, + &acpi_gbl_reg_comment_cache); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* + * This cache keeps track of the starting addresses of where the comments + * lie. This helps prevent duplication of comments. + */ + status = + acpi_os_create_cache("Acpi-Comment-Addr", + sizeof(struct acpi_comment_addr_node), + ACPI_MAX_COMMENT_CACHE_DEPTH, + &acpi_gbl_comment_addr_cache); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* + * This cache will be used for nodes that represent files. + */ + status = + acpi_os_create_cache("Acpi-File", sizeof(struct acpi_file_node), + ACPI_MAX_COMMENT_CACHE_DEPTH, + &acpi_gbl_file_cache); + if (ACPI_FAILURE(status)) { + return (status); + } +#endif + #ifdef ACPI_DBG_TRACK_ALLOCATIONS /* Memory allocation lists */ @@ -201,6 +240,17 @@ acpi_status acpi_ut_delete_caches(void) (void)acpi_os_delete_cache(acpi_gbl_ps_node_ext_cache); acpi_gbl_ps_node_ext_cache = NULL; +#ifdef ACPI_ASL_COMPILER + (void)acpi_os_delete_cache(acpi_gbl_reg_comment_cache); + acpi_gbl_reg_comment_cache = NULL; + + (void)acpi_os_delete_cache(acpi_gbl_comment_addr_cache); + acpi_gbl_comment_addr_cache = NULL; + + (void)acpi_os_delete_cache(acpi_gbl_file_cache); + acpi_gbl_file_cache = NULL; +#endif + #ifdef ACPI_DBG_TRACK_ALLOCATIONS /* Debug only - display leftover memory allocation, if any */ diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c index 11c7f72f2d56..531493306dee 100644 --- a/drivers/acpi/acpica/utcache.c +++ b/drivers/acpi/acpica/utcache.c @@ -71,7 +71,7 @@ acpi_os_create_cache(char *cache_name, ACPI_FUNCTION_ENTRY(); - if (!cache_name || !return_cache || (object_size < 16)) { + if (!cache_name || !return_cache || !object_size) { return (AE_BAD_PARAMETER); } diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index bd5ea3101eb7..615a885e2ca3 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c @@ -627,4 +627,5 @@ acpi_trace_point(acpi_trace_event_type type, u8 begin, u8 *aml, char *pathname) } ACPI_EXPORT_SYMBOL(acpi_trace_point) + #endif diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c index ff096d9755b9..e0587c85bafd 100644 --- a/drivers/acpi/acpica/utresrc.c +++ b/drivers/acpi/acpica/utresrc.c @@ -474,6 +474,15 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state, return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); } + /* + * The end_tag opcode must be followed by a zero byte. + * Although this byte is technically defined to be a checksum, + * in practice, all ASL compilers set this byte to zero. + */ + if (*(aml + 1) != 0) { + return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); + } + /* Return the pointer to the end_tag if requested */ if (!user_function) { diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c index a16bd9eac653..950a1e500bfa 100644 --- a/drivers/acpi/acpica/utxferror.c +++ b/drivers/acpi/acpica/utxferror.c @@ -91,7 +91,7 @@ ACPI_EXPORT_SYMBOL(acpi_error) * * PARAMETERS: module_name - Caller's module name (for error output) * line_number - Caller's line number (for error output) - * status - Status to be formatted + * status - Status value to be decoded/formatted * format - Printf format string + additional args * * RETURN: None @@ -132,8 +132,8 @@ ACPI_EXPORT_SYMBOL(acpi_exception) * * FUNCTION: acpi_warning * - * PARAMETERS: module_name - Caller's module name (for error output) - * line_number - Caller's line number (for error output) + * PARAMETERS: module_name - Caller's module name (for warning output) + * line_number - Caller's line number (for warning output) * format - Printf format string + additional args * * RETURN: None @@ -163,17 +163,13 @@ ACPI_EXPORT_SYMBOL(acpi_warning) * * FUNCTION: acpi_info * - * PARAMETERS: module_name - Caller's module name (for error output) - * line_number - Caller's line number (for error output) - * format - Printf format string + additional args + * PARAMETERS: format - Printf format string + additional args * * RETURN: None * * DESCRIPTION: Print generic "ACPI:" information message. There is no * module/line/version info in order to keep the message simple. * - * TBD: module_name and line_number args are not needed, should be removed. - * ******************************************************************************/ void ACPI_INTERNAL_VAR_XFACE acpi_info(const char *format, ...) { @@ -229,8 +225,8 @@ ACPI_EXPORT_SYMBOL(acpi_bios_error) * * FUNCTION: acpi_bios_warning * - * PARAMETERS: module_name - Caller's module name (for error output) - * line_number - Caller's line number (for error output) + * PARAMETERS: module_name - Caller's module name (for warning output) + * line_number - Caller's line number (for warning output) * format - Printf format string + additional args * * RETURN: None diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 22e08d272db7..c5fecf97ee2f 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -618,6 +618,46 @@ static int arm_smmu_iort_xlate(struct device *dev, u32 streamid, return ret; } +static inline bool iort_iommu_driver_enabled(u8 type) +{ + switch (type) { + case ACPI_IORT_NODE_SMMU_V3: + return IS_BUILTIN(CONFIG_ARM_SMMU_V3); + case ACPI_IORT_NODE_SMMU: + return IS_BUILTIN(CONFIG_ARM_SMMU); + default: + pr_warn("IORT node type %u does not describe an SMMU\n", type); + return false; + } +} + +#ifdef CONFIG_IOMMU_API +static inline +const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec) +{ + return (fwspec && fwspec->ops) ? fwspec->ops : NULL; +} + +static inline +int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) +{ + int err = 0; + + if (!IS_ERR_OR_NULL(ops) && ops->add_device && dev->bus && + !dev->iommu_group) + err = ops->add_device(dev); + + return err; +} +#else +static inline +const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec) +{ return NULL; } +static inline +int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) +{ return 0; } +#endif + static const struct iommu_ops *iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node, u32 streamid) @@ -626,14 +666,31 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev, int ret = -ENODEV; struct fwnode_handle *iort_fwnode; + /* + * If we already translated the fwspec there + * is nothing left to do, return the iommu_ops. + */ + ops = iort_fwspec_iommu_ops(dev->iommu_fwspec); + if (ops) + return ops; + if (node) { iort_fwnode = iort_get_fwnode(node); if (!iort_fwnode) return NULL; ops = iommu_ops_from_fwnode(iort_fwnode); + /* + * If the ops look-up fails, this means that either + * the SMMU drivers have not been probed yet or that + * the SMMU drivers are not built in the kernel; + * Depending on whether the SMMU drivers are built-in + * in the kernel or not, defer the IOMMU configuration + * or just abort it. + */ if (!ops) - return NULL; + return iort_iommu_driver_enabled(node->type) ? + ERR_PTR(-EPROBE_DEFER) : NULL; ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops); } @@ -676,6 +733,7 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) struct acpi_iort_node *node, *parent; const struct iommu_ops *ops = NULL; u32 streamid = 0; + int err; if (dev_is_pci(dev)) { struct pci_bus *bus = to_pci_dev(dev)->bus; @@ -707,6 +765,8 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) while (parent) { ops = iort_iommu_xlate(dev, parent, streamid); + if (IS_ERR_OR_NULL(ops)) + return ops; parent = iort_node_map_platform_id(node, &streamid, IORT_IOMMU_TYPE, @@ -714,6 +774,14 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) } } + /* + * If we have reason to believe the IOMMU driver missed the initial + * add_device callback for dev, replay it to get things in order. + */ + err = iort_add_device_replay(ops, dev); + if (err) + ops = ERR_PTR(err); + return ops; } @@ -1052,6 +1120,4 @@ void __init acpi_iort_init(void) } iort_init_platform_devices(); - - acpi_probe_device_table(iort); } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index d42eeef9d928..a9a9ab3399d4 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -782,7 +782,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume) if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) || (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) && (battery->capacity_now <= battery->alarm))) - pm_wakeup_event(&battery->device->dev, 0); + pm_wakeup_hard_event(&battery->device->dev); return result; } diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 34fbe027e73a..784bda663d16 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -114,6 +114,11 @@ int acpi_bus_get_status(struct acpi_device *device) acpi_status status; unsigned long long sta; + if (acpi_device_always_present(device)) { + acpi_set_device_status(device, ACPI_STA_DEFAULT); + return 0; + } + status = acpi_bus_get_status_handle(device->handle, &sta); if (ACPI_FAILURE(status)) return -ENODEV; diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 668137e4a069..b7c2a06963d6 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -216,7 +216,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state) } if (state) - pm_wakeup_event(&device->dev, 0); + pm_wakeup_hard_event(&device->dev); ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); if (ret == NOTIFY_DONE) @@ -398,7 +398,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event) } else { int keycode; - pm_wakeup_event(&device->dev, 0); + pm_wakeup_hard_event(&device->dev); if (button->suspended) break; @@ -530,6 +530,7 @@ static int acpi_button_add(struct acpi_device *device) lid_device = device; } + device_init_wakeup(&device->dev, true); printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); return 0; diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 993fd31394c8..798d5003a039 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -24,6 +24,7 @@ #include <linux/pm_qos.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include <linux/suspend.h> #include "internal.h" @@ -399,7 +400,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used) mutex_lock(&acpi_pm_notifier_lock); if (adev->wakeup.flags.notifier_present) { - __pm_wakeup_event(adev->wakeup.ws, 0); + pm_wakeup_ws_event(adev->wakeup.ws, 0, true); if (adev->wakeup.context.work.func) queue_pm_work(&adev->wakeup.context.work); } diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 3e7020751d34..3be1433853bf 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -179,7 +179,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) struct list_head *physnode_list; unsigned int node_id; int retval = -EINVAL; - enum dev_dma_attr attr; if (has_acpi_companion(dev)) { if (acpi_dev) { @@ -236,10 +235,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) if (!has_acpi_companion(dev)) ACPI_COMPANION_SET(dev, acpi_dev); - attr = acpi_get_dma_attr(acpi_dev); - if (attr != DEV_DMA_NOT_SUPPORTED) - acpi_dma_configure(dev, attr); - acpi_physnode_link_name(physical_node_name, node_id); retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, physical_node_name); diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c index 55f51115f016..1a76c784cd4c 100644 --- a/drivers/acpi/pmic/intel_pmic_xpower.c +++ b/drivers/acpi/pmic/intel_pmic_xpower.c @@ -27,97 +27,97 @@ static struct pmic_table power_table[] = { .address = 0x00, .reg = 0x13, .bit = 0x05, - }, + }, /* ALD1 */ { .address = 0x04, .reg = 0x13, .bit = 0x06, - }, + }, /* ALD2 */ { .address = 0x08, .reg = 0x13, .bit = 0x07, - }, + }, /* ALD3 */ { .address = 0x0c, .reg = 0x12, .bit = 0x03, - }, + }, /* DLD1 */ { .address = 0x10, .reg = 0x12, .bit = 0x04, - }, + }, /* DLD2 */ { .address = 0x14, .reg = 0x12, .bit = 0x05, - }, + }, /* DLD3 */ { .address = 0x18, .reg = 0x12, .bit = 0x06, - }, + }, /* DLD4 */ { .address = 0x1c, .reg = 0x12, .bit = 0x00, - }, + }, /* ELD1 */ { .address = 0x20, .reg = 0x12, .bit = 0x01, - }, + }, /* ELD2 */ { .address = 0x24, .reg = 0x12, .bit = 0x02, - }, + }, /* ELD3 */ { .address = 0x28, .reg = 0x13, .bit = 0x02, - }, + }, /* FLD1 */ { .address = 0x2c, .reg = 0x13, .bit = 0x03, - }, + }, /* FLD2 */ { .address = 0x30, .reg = 0x13, .bit = 0x04, - }, + }, /* FLD3 */ { - .address = 0x38, + .address = 0x34, .reg = 0x10, .bit = 0x03, - }, + }, /* BUC1 */ { - .address = 0x3c, + .address = 0x38, .reg = 0x10, .bit = 0x06, - }, + }, /* BUC2 */ { - .address = 0x40, + .address = 0x3c, .reg = 0x10, .bit = 0x05, - }, + }, /* BUC3 */ { - .address = 0x44, + .address = 0x40, .reg = 0x10, .bit = 0x04, - }, + }, /* BUC4 */ { - .address = 0x48, + .address = 0x44, .reg = 0x10, .bit = 0x01, - }, + }, /* BUC5 */ { - .address = 0x4c, + .address = 0x48, .reg = 0x10, .bit = 0x00 - }, + }, /* BUC6 */ }; /* TMP0 - TMP5 are the same, all from GPADC */ diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 1c2b846c5776..3a6c9b741b23 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -864,6 +864,16 @@ void acpi_resume_power_resources(void) mutex_unlock(&resource->resource_lock); } + + mutex_unlock(&power_resource_list_lock); +} + +void acpi_turn_off_unused_power_resources(void) +{ + struct acpi_power_resource *resource; + + mutex_lock(&power_resource_list_lock); + list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) { int result, state; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index c26931067415..e39ec7b7cb67 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1363,20 +1363,25 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) * @dev: The pointer to the device * @attr: device dma attributes */ -void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) +int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) { const struct iommu_ops *iommu; + u64 size; iort_set_dma_mask(dev); iommu = iort_iommu_configure(dev); + if (IS_ERR(iommu)) + return PTR_ERR(iommu); + size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); /* * Assume dma valid range starts at 0 and covers the whole * coherent_dma_mask. */ - arch_setup_dma_ops(dev, 0, dev->coherent_dma_mask + 1, iommu, - attr == DEV_DMA_COHERENT); + arch_setup_dma_ops(dev, 0, size, iommu, attr == DEV_DMA_COHERENT); + + return 0; } EXPORT_SYMBOL_GPL(acpi_dma_configure); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index a4327af676fe..a6574d626340 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -474,6 +474,7 @@ static void acpi_pm_start(u32 acpi_state) */ static void acpi_pm_end(void) { + acpi_turn_off_unused_power_resources(); acpi_scan_lock_release(); /* * This is necessary in case acpi_pm_finish() is not called during a @@ -662,14 +663,40 @@ static int acpi_freeze_prepare(void) acpi_os_wait_events_complete(); if (acpi_sci_irq_valid()) enable_irq_wake(acpi_sci_irq); + return 0; } +static void acpi_freeze_wake(void) +{ + /* + * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means + * that the SCI has triggered while suspended, so cancel the wakeup in + * case it has not been a wakeup event (the GPEs will be checked later). + */ + if (acpi_sci_irq_valid() && + !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) + pm_system_cancel_wakeup(); +} + +static void acpi_freeze_sync(void) +{ + /* + * Process all pending events in case there are any wakeup ones. + * + * The EC driver uses the system workqueue, so that one needs to be + * flushed too. + */ + acpi_os_wait_events_complete(); + flush_scheduled_work(); +} + static void acpi_freeze_restore(void) { acpi_disable_wakeup_devices(ACPI_STATE_S0); if (acpi_sci_irq_valid()) disable_irq_wake(acpi_sci_irq); + acpi_enable_all_runtime_gpes(); } @@ -681,6 +708,8 @@ static void acpi_freeze_end(void) static const struct platform_freeze_ops acpi_freeze_ops = { .begin = acpi_freeze_begin, .prepare = acpi_freeze_prepare, + .wake = acpi_freeze_wake, + .sync = acpi_freeze_sync, .restore = acpi_freeze_restore, .end = acpi_freeze_end, }; diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h index a9cc34e663f9..a82ff74faf7a 100644 --- a/drivers/acpi/sleep.h +++ b/drivers/acpi/sleep.h @@ -6,6 +6,7 @@ extern struct list_head acpi_wakeup_device_list; extern struct mutex acpi_device_lock; extern void acpi_resume_power_resources(void); +extern void acpi_turn_off_unused_power_resources(void); static inline acpi_status acpi_set_waking_vector(u32 wakeup_address) { diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c new file mode 100644 index 000000000000..bd86b809c848 --- /dev/null +++ b/drivers/acpi/x86/utils.c @@ -0,0 +1,90 @@ +/* + * X86 ACPI Utility Functions + * + * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com> + * + * Based on various non upstream patches to support the CHT Whiskey Cove PMIC: + * Copyright (C) 2013-2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/acpi.h> +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> +#include "../internal.h" + +/* + * Some ACPI devices are hidden (status == 0x0) in recent BIOS-es because + * some recent Windows drivers bind to one device but poke at multiple + * devices at the same time, so the others get hidden. + * We work around this by always reporting ACPI_STA_DEFAULT for these + * devices. Note this MUST only be done for devices where this is safe. + * + * This forcing of devices to be present is limited to specific CPU (SoC) + * models both to avoid potentially causing trouble on other models and + * because some HIDs are re-used on different SoCs for completely + * different devices. + */ +struct always_present_id { + struct acpi_device_id hid[2]; + struct x86_cpu_id cpu_ids[2]; + const char *uid; +}; + +#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } + +#define ENTRY(hid, uid, cpu_models) { \ + { { hid, }, {} }, \ + { cpu_models, {} }, \ + uid, \ +} + +static const struct always_present_id always_present_ids[] = { + /* + * Bay / Cherry Trail PWM directly poked by GPU driver in win10, + * but Linux uses a separate PWM driver, harmless if not used. + */ + ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT1)), + ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT)), + /* + * The INT0002 device is necessary to clear wakeup interrupt sources + * on Cherry Trail devices, without it we get nobody cared IRQ msgs. + */ + ENTRY("INT0002", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT)), +}; + +bool acpi_device_always_present(struct acpi_device *adev) +{ + u32 *status = (u32 *)&adev->status; + u32 old_status = *status; + bool ret = false; + unsigned int i; + + /* acpi_match_device_ids checks status, so set it to default */ + *status = ACPI_STA_DEFAULT; + for (i = 0; i < ARRAY_SIZE(always_present_ids); i++) { + if (acpi_match_device_ids(adev, always_present_ids[i].hid)) + continue; + + if (!adev->pnp.unique_id || + strcmp(adev->pnp.unique_id, always_present_ids[i].uid)) + continue; + + if (!x86_match_cpu(always_present_ids[i].cpu_ids)) + continue; + + if (old_status != ACPI_STA_DEFAULT) /* Log only once */ + dev_info(&adev->dev, + "Device [%s] is in always present list\n", + adev->pnp.bus_id); + + ret = true; + break; + } + *status = old_status; + + return ret; +} diff --git a/drivers/base/dd.c b/drivers/base/dd.c index a1fbf55c4d3a..4882f06d12df 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -19,6 +19,7 @@ #include <linux/device.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/kthread.h> #include <linux/wait.h> @@ -356,6 +357,10 @@ re_probe: if (ret) goto pinctrl_bind_failed; + ret = dma_configure(dev); + if (ret) + goto dma_failed; + if (driver_sysfs_add(dev)) { printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n", __func__, dev_name(dev)); @@ -417,6 +422,8 @@ re_probe: goto done; probe_failed: + dma_deconfigure(dev); +dma_failed: if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_DRIVER_NOT_BOUND, dev); @@ -826,6 +833,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) drv->remove(dev); device_links_driver_cleanup(dev); + dma_deconfigure(dev); + devres_release_all(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 51b7061ff7c0..f3deb6af42ad 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -7,9 +7,11 @@ * This file is released under the GPLv2. */ +#include <linux/acpi.h> #include <linux/dma-mapping.h> #include <linux/export.h> #include <linux/gfp.h> +#include <linux/of_device.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -340,3 +342,42 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) vunmap(cpu_addr); } #endif + +/* + * Common configuration to enable DMA API use for a device + */ +#include <linux/pci.h> + +int dma_configure(struct device *dev) +{ + struct device *bridge = NULL, *dma_dev = dev; + enum dev_dma_attr attr; + int ret = 0; + + if (dev_is_pci(dev)) { + bridge = pci_get_host_bridge_device(to_pci_dev(dev)); + dma_dev = bridge; + if (IS_ENABLED(CONFIG_OF) && dma_dev->parent && + dma_dev->parent->of_node) + dma_dev = dma_dev->parent; + } + + if (dma_dev->of_node) { + ret = of_dma_configure(dev, dma_dev->of_node); + } else if (has_acpi_companion(dma_dev)) { + attr = acpi_get_dma_attr(to_acpi_device_node(dma_dev->fwnode)); + if (attr != DEV_DMA_NOT_SUPPORTED) + ret = acpi_dma_configure(dev, attr); + } + + if (bridge) + pci_put_host_bridge_device(bridge); + + return ret; +} + +void dma_deconfigure(struct device *dev) +{ + of_dma_deconfigure(dev); + acpi_dma_deconfigure(dev); +} diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 9faee1c893e5..e987a6f55d36 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1091,11 +1091,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a if (async_error) goto Complete; - if (pm_wakeup_pending()) { - async_error = -EBUSY; - goto Complete; - } - if (dev->power.syscore || dev->power.direct_complete) goto Complete; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 136854970489..f62082fdd670 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -28,8 +28,8 @@ bool events_check_enabled __read_mostly; /* First wakeup IRQ seen by the kernel in the last cycle. */ unsigned int pm_wakeup_irq __read_mostly; -/* If set and the system is suspending, terminate the suspend. */ -static bool pm_abort_suspend __read_mostly; +/* If greater than 0 and the system is suspending, terminate the suspend. */ +static atomic_t pm_abort_suspend __read_mostly; /* * Combined counters of registered wakeup events and wakeup events in progress. @@ -512,12 +512,13 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws) /** * wakup_source_activate - Mark given wakeup source as active. * @ws: Wakeup source to handle. + * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. * * Update the @ws' statistics and, if @ws has just been activated, notify the PM * core of the event by incrementing the counter of of wakeup events being * processed. */ -static void wakeup_source_activate(struct wakeup_source *ws) +static void wakeup_source_activate(struct wakeup_source *ws, bool hard) { unsigned int cec; @@ -525,11 +526,8 @@ static void wakeup_source_activate(struct wakeup_source *ws) "unregistered wakeup source\n")) return; - /* - * active wakeup source should bring the system - * out of PM_SUSPEND_FREEZE state - */ - freeze_wake(); + if (hard) + pm_system_wakeup(); ws->active = true; ws->active_count++; @@ -546,8 +544,9 @@ static void wakeup_source_activate(struct wakeup_source *ws) /** * wakeup_source_report_event - Report wakeup event using the given source. * @ws: Wakeup source to report the event for. + * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. */ -static void wakeup_source_report_event(struct wakeup_source *ws) +static void wakeup_source_report_event(struct wakeup_source *ws, bool hard) { ws->event_count++; /* This is racy, but the counter is approximate anyway. */ @@ -555,7 +554,7 @@ static void wakeup_source_report_event(struct wakeup_source *ws) ws->wakeup_count++; if (!ws->active) - wakeup_source_activate(ws); + wakeup_source_activate(ws, hard); } /** @@ -573,7 +572,7 @@ void __pm_stay_awake(struct wakeup_source *ws) spin_lock_irqsave(&ws->lock, flags); - wakeup_source_report_event(ws); + wakeup_source_report_event(ws, false); del_timer(&ws->timer); ws->timer_expires = 0; @@ -739,9 +738,10 @@ static void pm_wakeup_timer_fn(unsigned long data) } /** - * __pm_wakeup_event - Notify the PM core of a wakeup event. + * pm_wakeup_ws_event - Notify the PM core of a wakeup event. * @ws: Wakeup source object associated with the event source. * @msec: Anticipated event processing time (in milliseconds). + * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. * * Notify the PM core of a wakeup event whose source is @ws that will take * approximately @msec milliseconds to be processed by the kernel. If @ws is @@ -750,7 +750,7 @@ static void pm_wakeup_timer_fn(unsigned long data) * * It is safe to call this function from interrupt context. */ -void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) +void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard) { unsigned long flags; unsigned long expires; @@ -760,7 +760,7 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) spin_lock_irqsave(&ws->lock, flags); - wakeup_source_report_event(ws); + wakeup_source_report_event(ws, hard); if (!msec) { wakeup_source_deactivate(ws); @@ -779,17 +779,17 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) unlock: spin_unlock_irqrestore(&ws->lock, flags); } -EXPORT_SYMBOL_GPL(__pm_wakeup_event); - +EXPORT_SYMBOL_GPL(pm_wakeup_ws_event); /** * pm_wakeup_event - Notify the PM core of a wakeup event. * @dev: Device the wakeup event is related to. * @msec: Anticipated event processing time (in milliseconds). + * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. * - * Call __pm_wakeup_event() for the @dev's wakeup source object. + * Call pm_wakeup_ws_event() for the @dev's wakeup source object. */ -void pm_wakeup_event(struct device *dev, unsigned int msec) +void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard) { unsigned long flags; @@ -797,10 +797,10 @@ void pm_wakeup_event(struct device *dev, unsigned int msec) return; spin_lock_irqsave(&dev->power.lock, flags); - __pm_wakeup_event(dev->power.wakeup, msec); + pm_wakeup_ws_event(dev->power.wakeup, msec, hard); spin_unlock_irqrestore(&dev->power.lock, flags); } -EXPORT_SYMBOL_GPL(pm_wakeup_event); +EXPORT_SYMBOL_GPL(pm_wakeup_dev_event); void pm_print_active_wakeup_sources(void) { @@ -856,20 +856,26 @@ bool pm_wakeup_pending(void) pm_print_active_wakeup_sources(); } - return ret || pm_abort_suspend; + return ret || atomic_read(&pm_abort_suspend) > 0; } void pm_system_wakeup(void) { - pm_abort_suspend = true; + atomic_inc(&pm_abort_suspend); freeze_wake(); } EXPORT_SYMBOL_GPL(pm_system_wakeup); -void pm_wakeup_clear(void) +void pm_system_cancel_wakeup(void) +{ + atomic_dec(&pm_abort_suspend); +} + +void pm_wakeup_clear(bool reset) { - pm_abort_suspend = false; pm_wakeup_irq = 0; + if (reset) + atomic_set(&pm_abort_suspend, 0); } void pm_system_irq_wakeup(unsigned int irq_number) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 26812c1ed0cf..454bf9c34882 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -387,6 +387,7 @@ struct rbd_device { struct rw_semaphore lock_rwsem; enum rbd_lock_state lock_state; + char lock_cookie[32]; struct rbd_client_id owner_cid; struct work_struct acquired_lock_work; struct work_struct released_lock_work; @@ -477,13 +478,6 @@ static int minor_to_rbd_dev_id(int minor) return minor >> RBD_SINGLE_MAJOR_PART_SHIFT; } -static bool rbd_is_lock_supported(struct rbd_device *rbd_dev) -{ - return (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) && - rbd_dev->spec->snap_id == CEPH_NOSNAP && - !rbd_dev->mapping.read_only; -} - static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev) { return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED || @@ -731,7 +725,7 @@ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts) kref_init(&rbdc->kref); INIT_LIST_HEAD(&rbdc->node); - rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0); + rbdc->client = ceph_create_client(ceph_opts, rbdc); if (IS_ERR(rbdc->client)) goto out_rbdc; ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */ @@ -804,6 +798,7 @@ enum { Opt_read_only, Opt_read_write, Opt_lock_on_read, + Opt_exclusive, Opt_err }; @@ -816,6 +811,7 @@ static match_table_t rbd_opts_tokens = { {Opt_read_write, "read_write"}, {Opt_read_write, "rw"}, /* Alternate spelling */ {Opt_lock_on_read, "lock_on_read"}, + {Opt_exclusive, "exclusive"}, {Opt_err, NULL} }; @@ -823,11 +819,13 @@ struct rbd_options { int queue_depth; bool read_only; bool lock_on_read; + bool exclusive; }; #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ #define RBD_READ_ONLY_DEFAULT false #define RBD_LOCK_ON_READ_DEFAULT false +#define RBD_EXCLUSIVE_DEFAULT false static int parse_rbd_opts_token(char *c, void *private) { @@ -866,6 +864,9 @@ static int parse_rbd_opts_token(char *c, void *private) case Opt_lock_on_read: rbd_opts->lock_on_read = true; break; + case Opt_exclusive: + rbd_opts->exclusive = true; + break; default: /* libceph prints "bad option" msg */ return -EINVAL; @@ -3079,7 +3080,8 @@ static int rbd_lock(struct rbd_device *rbd_dev) char cookie[32]; int ret; - WARN_ON(__rbd_is_lock_owner(rbd_dev)); + WARN_ON(__rbd_is_lock_owner(rbd_dev) || + rbd_dev->lock_cookie[0] != '\0'); format_lock_cookie(rbd_dev, cookie); ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, @@ -3089,6 +3091,7 @@ static int rbd_lock(struct rbd_device *rbd_dev) return ret; rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED; + strcpy(rbd_dev->lock_cookie, cookie); rbd_set_owner_cid(rbd_dev, &cid); queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work); return 0; @@ -3097,27 +3100,24 @@ static int rbd_lock(struct rbd_device *rbd_dev) /* * lock_rwsem must be held for write */ -static int rbd_unlock(struct rbd_device *rbd_dev) +static void rbd_unlock(struct rbd_device *rbd_dev) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; - char cookie[32]; int ret; - WARN_ON(!__rbd_is_lock_owner(rbd_dev)); - - rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED; + WARN_ON(!__rbd_is_lock_owner(rbd_dev) || + rbd_dev->lock_cookie[0] == '\0'); - format_lock_cookie(rbd_dev, cookie); ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, - RBD_LOCK_NAME, cookie); - if (ret && ret != -ENOENT) { - rbd_warn(rbd_dev, "cls_unlock failed: %d", ret); - return ret; - } + RBD_LOCK_NAME, rbd_dev->lock_cookie); + if (ret && ret != -ENOENT) + rbd_warn(rbd_dev, "failed to unlock: %d", ret); + /* treat errors as the image is unlocked */ + rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED; + rbd_dev->lock_cookie[0] = '\0'; rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work); - return 0; } static int __rbd_notify_op_lock(struct rbd_device *rbd_dev, @@ -3447,6 +3447,18 @@ again: ret = rbd_request_lock(rbd_dev); if (ret == -ETIMEDOUT) { goto again; /* treat this as a dead client */ + } else if (ret == -EROFS) { + rbd_warn(rbd_dev, "peer will not release lock"); + /* + * If this is rbd_add_acquire_lock(), we want to fail + * immediately -- reuse BLACKLISTED flag. Otherwise we + * want to block. + */ + if (!(rbd_dev->disk->flags & GENHD_FL_UP)) { + set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags); + /* wake "rbd map --exclusive" process */ + wake_requests(rbd_dev, false); + } } else if (ret < 0) { rbd_warn(rbd_dev, "error requesting lock: %d", ret); mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, @@ -3490,16 +3502,15 @@ static bool rbd_release_lock(struct rbd_device *rbd_dev) if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING) return false; - if (!rbd_unlock(rbd_dev)) - /* - * Give others a chance to grab the lock - we would re-acquire - * almost immediately if we got new IO during ceph_osdc_sync() - * otherwise. We need to ack our own notifications, so this - * lock_dwork will be requeued from rbd_wait_state_locked() - * after wake_requests() in rbd_handle_released_lock(). - */ - cancel_delayed_work(&rbd_dev->lock_dwork); - + rbd_unlock(rbd_dev); + /* + * Give others a chance to grab the lock - we would re-acquire + * almost immediately if we got new IO during ceph_osdc_sync() + * otherwise. We need to ack our own notifications, so this + * lock_dwork will be requeued from rbd_wait_state_locked() + * after wake_requests() in rbd_handle_released_lock(). + */ + cancel_delayed_work(&rbd_dev->lock_dwork); return true; } @@ -3580,12 +3591,16 @@ static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v, up_read(&rbd_dev->lock_rwsem); } -static bool rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v, - void **p) +/* + * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no + * ResponseMessage is needed. + */ +static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v, + void **p) { struct rbd_client_id my_cid = rbd_get_cid(rbd_dev); struct rbd_client_id cid = { 0 }; - bool need_to_send; + int result = 1; if (struct_v >= 2) { cid.gid = ceph_decode_64(p); @@ -3595,19 +3610,36 @@ static bool rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v, dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid, cid.handle); if (rbd_cid_equal(&cid, &my_cid)) - return false; + return result; down_read(&rbd_dev->lock_rwsem); - need_to_send = __rbd_is_lock_owner(rbd_dev); - if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) { - if (!rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid)) { - dout("%s rbd_dev %p queueing unlock_work\n", __func__, - rbd_dev); - queue_work(rbd_dev->task_wq, &rbd_dev->unlock_work); + if (__rbd_is_lock_owner(rbd_dev)) { + if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED && + rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid)) + goto out_unlock; + + /* + * encode ResponseMessage(0) so the peer can detect + * a missing owner + */ + result = 0; + + if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) { + if (!rbd_dev->opts->exclusive) { + dout("%s rbd_dev %p queueing unlock_work\n", + __func__, rbd_dev); + queue_work(rbd_dev->task_wq, + &rbd_dev->unlock_work); + } else { + /* refuse to release the lock */ + result = -EROFS; + } } } + +out_unlock: up_read(&rbd_dev->lock_rwsem); - return need_to_send; + return result; } static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev, @@ -3690,13 +3722,10 @@ static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie, rbd_acknowledge_notify(rbd_dev, notify_id, cookie); break; case RBD_NOTIFY_OP_REQUEST_LOCK: - if (rbd_handle_request_lock(rbd_dev, struct_v, &p)) - /* - * send ResponseMessage(0) back so the client - * can detect a missing owner - */ + ret = rbd_handle_request_lock(rbd_dev, struct_v, &p); + if (ret <= 0) rbd_acknowledge_notify_result(rbd_dev, notify_id, - cookie, 0); + cookie, ret); else rbd_acknowledge_notify(rbd_dev, notify_id, cookie); break; @@ -3821,24 +3850,51 @@ static void rbd_unregister_watch(struct rbd_device *rbd_dev) ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc); } +/* + * lock_rwsem must be held for write + */ +static void rbd_reacquire_lock(struct rbd_device *rbd_dev) +{ + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; + char cookie[32]; + int ret; + + WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED); + + format_lock_cookie(rbd_dev, cookie); + ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid, + &rbd_dev->header_oloc, RBD_LOCK_NAME, + CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie, + RBD_LOCK_TAG, cookie); + if (ret) { + if (ret != -EOPNOTSUPP) + rbd_warn(rbd_dev, "failed to update lock cookie: %d", + ret); + + /* + * Lock cookie cannot be updated on older OSDs, so do + * a manual release and queue an acquire. + */ + if (rbd_release_lock(rbd_dev)) + queue_delayed_work(rbd_dev->task_wq, + &rbd_dev->lock_dwork, 0); + } else { + strcpy(rbd_dev->lock_cookie, cookie); + } +} + static void rbd_reregister_watch(struct work_struct *work) { struct rbd_device *rbd_dev = container_of(to_delayed_work(work), struct rbd_device, watch_dwork); - bool was_lock_owner = false; - bool need_to_wake = false; int ret; dout("%s rbd_dev %p\n", __func__, rbd_dev); - down_write(&rbd_dev->lock_rwsem); - if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) - was_lock_owner = rbd_release_lock(rbd_dev); - mutex_lock(&rbd_dev->watch_mutex); if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) { mutex_unlock(&rbd_dev->watch_mutex); - goto out; + return; } ret = __rbd_register_watch(rbd_dev); @@ -3846,36 +3902,28 @@ static void rbd_reregister_watch(struct work_struct *work) rbd_warn(rbd_dev, "failed to reregister watch: %d", ret); if (ret == -EBLACKLISTED || ret == -ENOENT) { set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags); - need_to_wake = true; + wake_requests(rbd_dev, true); } else { queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, RBD_RETRY_DELAY); } mutex_unlock(&rbd_dev->watch_mutex); - goto out; + return; } - need_to_wake = true; rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED; rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id; mutex_unlock(&rbd_dev->watch_mutex); + down_write(&rbd_dev->lock_rwsem); + if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) + rbd_reacquire_lock(rbd_dev); + up_write(&rbd_dev->lock_rwsem); + ret = rbd_dev_refresh(rbd_dev); if (ret) rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret); - - if (was_lock_owner) { - ret = rbd_try_lock(rbd_dev); - if (ret) - rbd_warn(rbd_dev, "reregisteration lock failed: %d", - ret); - } - -out: - up_write(&rbd_dev->lock_rwsem); - if (need_to_wake) - wake_requests(rbd_dev, true); } /* @@ -4034,10 +4082,6 @@ static void rbd_queue_workfn(struct work_struct *work) if (op_type != OBJ_OP_READ) { snapc = rbd_dev->header.snapc; ceph_get_snap_context(snapc); - must_be_locked = rbd_is_lock_supported(rbd_dev); - } else { - must_be_locked = rbd_dev->opts->lock_on_read && - rbd_is_lock_supported(rbd_dev); } up_read(&rbd_dev->header_rwsem); @@ -4048,14 +4092,20 @@ static void rbd_queue_workfn(struct work_struct *work) goto err_rq; } + must_be_locked = + (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) && + (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read); if (must_be_locked) { down_read(&rbd_dev->lock_rwsem); if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && - !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) + !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { + if (rbd_dev->opts->exclusive) { + rbd_warn(rbd_dev, "exclusive lock required"); + result = -EROFS; + goto err_unlock; + } rbd_wait_state_locked(rbd_dev); - - WARN_ON((rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) ^ - !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)); + } if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { result = -EBLACKLISTED; goto err_unlock; @@ -4114,19 +4164,10 @@ static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx, static void rbd_free_disk(struct rbd_device *rbd_dev) { - struct gendisk *disk = rbd_dev->disk; - - if (!disk) - return; - + blk_cleanup_queue(rbd_dev->disk->queue); + blk_mq_free_tag_set(&rbd_dev->tag_set); + put_disk(rbd_dev->disk); rbd_dev->disk = NULL; - if (disk->flags & GENHD_FL_UP) { - del_gendisk(disk); - if (disk->queue) - blk_cleanup_queue(disk->queue); - blk_mq_free_tag_set(&rbd_dev->tag_set); - } - put_disk(disk); } static int rbd_obj_read_sync(struct rbd_device *rbd_dev, @@ -4383,8 +4424,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; + /* + * disk_release() expects a queue ref from add_disk() and will + * put it. Hold an extra ref until add_disk() is called. + */ + WARN_ON(!blk_get_queue(q)); disk->queue = q; - q->queuedata = rbd_dev; rbd_dev->disk = disk; @@ -5624,6 +5669,7 @@ static int rbd_add_parse_args(const char *buf, rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; + rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT; copts = ceph_parse_options(options, mon_addrs, mon_addrs + mon_addrs_size - 1, @@ -5682,6 +5728,33 @@ again: return ret; } +static void rbd_dev_image_unlock(struct rbd_device *rbd_dev) +{ + down_write(&rbd_dev->lock_rwsem); + if (__rbd_is_lock_owner(rbd_dev)) + rbd_unlock(rbd_dev); + up_write(&rbd_dev->lock_rwsem); +} + +static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) +{ + if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) { + rbd_warn(rbd_dev, "exclusive-lock feature is not enabled"); + return -EINVAL; + } + + /* FIXME: "rbd map --exclusive" should be in interruptible */ + down_read(&rbd_dev->lock_rwsem); + rbd_wait_state_locked(rbd_dev); + up_read(&rbd_dev->lock_rwsem); + if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { + rbd_warn(rbd_dev, "failed to acquire exclusive lock"); + return -EROFS; + } + + return 0; +} + /* * An rbd format 2 image has a unique identifier, distinct from the * name given to it by the user. Internally, that identifier is @@ -5873,6 +5946,15 @@ out_err: return ret; } +static void rbd_dev_device_release(struct rbd_device *rbd_dev) +{ + clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); + rbd_dev_mapping_clear(rbd_dev); + rbd_free_disk(rbd_dev); + if (!single_major) + unregister_blkdev(rbd_dev->major, rbd_dev->name); +} + /* * rbd_dev->header_rwsem must be locked for write and will be unlocked * upon return. @@ -5908,26 +5990,13 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only); - dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id); - ret = device_add(&rbd_dev->dev); + ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id); if (ret) goto err_out_mapping; - /* Everything's ready. Announce the disk to the world. */ - set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); up_write(&rbd_dev->header_rwsem); - - spin_lock(&rbd_dev_list_lock); - list_add_tail(&rbd_dev->node, &rbd_dev_list); - spin_unlock(&rbd_dev_list_lock); - - add_disk(rbd_dev->disk); - pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name, - (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT, - rbd_dev->header.features); - - return ret; + return 0; err_out_mapping: rbd_dev_mapping_clear(rbd_dev); @@ -5962,11 +6031,11 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev) static void rbd_dev_image_release(struct rbd_device *rbd_dev) { rbd_dev_unprobe(rbd_dev); + if (rbd_dev->opts) + rbd_unregister_watch(rbd_dev); rbd_dev->image_format = 0; kfree(rbd_dev->spec->image_id); rbd_dev->spec->image_id = NULL; - - rbd_dev_destroy(rbd_dev); } /* @@ -6126,22 +6195,43 @@ static ssize_t do_rbd_add(struct bus_type *bus, rbd_dev->mapping.read_only = read_only; rc = rbd_dev_device_setup(rbd_dev); - if (rc) { - /* - * rbd_unregister_watch() can't be moved into - * rbd_dev_image_release() without refactoring, see - * commit 1f3ef78861ac. - */ - rbd_unregister_watch(rbd_dev); - rbd_dev_image_release(rbd_dev); - goto out; + if (rc) + goto err_out_image_probe; + + if (rbd_dev->opts->exclusive) { + rc = rbd_add_acquire_lock(rbd_dev); + if (rc) + goto err_out_device_setup; } + /* Everything's ready. Announce the disk to the world. */ + + rc = device_add(&rbd_dev->dev); + if (rc) + goto err_out_image_lock; + + add_disk(rbd_dev->disk); + /* see rbd_init_disk() */ + blk_put_queue(rbd_dev->disk->queue); + + spin_lock(&rbd_dev_list_lock); + list_add_tail(&rbd_dev->node, &rbd_dev_list); + spin_unlock(&rbd_dev_list_lock); + + pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name, + (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT, + rbd_dev->header.features); rc = count; out: module_put(THIS_MODULE); return rc; +err_out_image_lock: + rbd_dev_image_unlock(rbd_dev); +err_out_device_setup: + rbd_dev_device_release(rbd_dev); +err_out_image_probe: + rbd_dev_image_release(rbd_dev); err_out_rbd_dev: rbd_dev_destroy(rbd_dev); err_out_client: @@ -6169,21 +6259,6 @@ static ssize_t rbd_add_single_major(struct bus_type *bus, return do_rbd_add(bus, buf, count); } -static void rbd_dev_device_release(struct rbd_device *rbd_dev) -{ - rbd_free_disk(rbd_dev); - - spin_lock(&rbd_dev_list_lock); - list_del_init(&rbd_dev->node); - spin_unlock(&rbd_dev_list_lock); - - clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); - device_del(&rbd_dev->dev); - rbd_dev_mapping_clear(rbd_dev); - if (!single_major) - unregister_blkdev(rbd_dev->major, rbd_dev->name); -} - static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) { while (rbd_dev->parent) { @@ -6201,6 +6276,7 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) } rbd_assert(second); rbd_dev_image_release(second); + rbd_dev_destroy(second); first->parent = NULL; first->parent_overlap = 0; @@ -6269,21 +6345,16 @@ static ssize_t do_rbd_remove(struct bus_type *bus, blk_set_queue_dying(rbd_dev->disk->queue); } - down_write(&rbd_dev->lock_rwsem); - if (__rbd_is_lock_owner(rbd_dev)) - rbd_unlock(rbd_dev); - up_write(&rbd_dev->lock_rwsem); - rbd_unregister_watch(rbd_dev); + del_gendisk(rbd_dev->disk); + spin_lock(&rbd_dev_list_lock); + list_del_init(&rbd_dev->node); + spin_unlock(&rbd_dev_list_lock); + device_del(&rbd_dev->dev); - /* - * Don't free anything from rbd_dev->disk until after all - * notifies are completely processed. Otherwise - * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting - * in a potential use after free of rbd_dev->disk or rbd_dev. - */ + rbd_dev_image_unlock(rbd_dev); rbd_dev_device_release(rbd_dev); rbd_dev_image_release(rbd_dev); - + rbd_dev_destroy(rbd_dev); return count; } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 94173de1efaa..553cc4c542b4 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -452,8 +452,7 @@ static int init_vq(struct virtio_blk *vblk) } /* Discover virtqueues and write information to configuration. */ - err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names, - &desc); + err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc); if (err) goto out; diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index e770ad977472..b67263d6e34b 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -94,9 +94,9 @@ static struct applicom_board { static unsigned int irq = 0; /* interrupt number IRQ */ static unsigned long mem = 0; /* physical segment of board */ -module_param(irq, uint, 0); +module_param_hw(irq, uint, irq, 0); MODULE_PARM_DESC(irq, "IRQ of the Applicom board"); -module_param(mem, ulong, 0); +module_param_hw(mem, ulong, iomem, 0); MODULE_PARM_DESC(mem, "Shared Memory Address of Applicom board"); static unsigned int numboards; /* number of installed boards */ diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index b2b618f066e0..59ee93ea84eb 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -1375,39 +1375,39 @@ MODULE_PARM_DESC(type, "Defines the type of each interface, each" " interface separated by commas. The types are 'kcs'," " 'smic', and 'bt'. For example si_type=kcs,bt will set" " the first interface to kcs and the second to bt"); -module_param_array(addrs, ulong, &num_addrs, 0); +module_param_hw_array(addrs, ulong, iomem, &num_addrs, 0); MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the" " addresses separated by commas. Only use if an interface" " is in memory. Otherwise, set it to zero or leave" " it blank."); -module_param_array(ports, uint, &num_ports, 0); +module_param_hw_array(ports, uint, ioport, &num_ports, 0); MODULE_PARM_DESC(ports, "Sets the port address of each interface, the" " addresses separated by commas. Only use if an interface" " is a port. Otherwise, set it to zero or leave" " it blank."); -module_param_array(irqs, int, &num_irqs, 0); +module_param_hw_array(irqs, int, irq, &num_irqs, 0); MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the" " addresses separated by commas. Only use if an interface" " has an interrupt. Otherwise, set it to zero or leave" " it blank."); -module_param_array(regspacings, int, &num_regspacings, 0); +module_param_hw_array(regspacings, int, other, &num_regspacings, 0); MODULE_PARM_DESC(regspacings, "The number of bytes between the start address" " and each successive register used by the interface. For" " instance, if the start address is 0xca2 and the spacing" " is 2, then the second address is at 0xca4. Defaults" " to 1."); -module_param_array(regsizes, int, &num_regsizes, 0); +module_param_hw_array(regsizes, int, other, &num_regsizes, 0); MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes." " This should generally be 1, 2, 4, or 8 for an 8-bit," " 16-bit, 32-bit, or 64-bit register. Use this if you" " the 8-bit IPMI register has to be read from a larger" " register."); -module_param_array(regshifts, int, &num_regshifts, 0); +module_param_hw_array(regshifts, int, other, &num_regshifts, 0); MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the." " IPMI register, in bits. For instance, if the data" " is read from a 32-bit word and the IPMI data is in" " bit 8-15, then the shift would be 8"); -module_param_array(slave_addrs, int, &num_slave_addrs, 0); +module_param_hw_array(slave_addrs, int, other, &num_slave_addrs, 0); MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for" " the controller. Normally this is 0x20, but can be" " overridden by this parm. This is an array indexed" diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c index 3a3ff2eb6cba..b5e3103c1175 100644 --- a/drivers/char/mwave/mwavedd.c +++ b/drivers/char/mwave/mwavedd.c @@ -80,10 +80,10 @@ int mwave_3780i_io = 0; int mwave_uart_irq = 0; int mwave_uart_io = 0; module_param(mwave_debug, int, 0); -module_param(mwave_3780i_irq, int, 0); -module_param(mwave_3780i_io, int, 0); -module_param(mwave_uart_irq, int, 0); -module_param(mwave_uart_io, int, 0); +module_param_hw(mwave_3780i_irq, int, irq, 0); +module_param_hw(mwave_3780i_io, int, ioport, 0); +module_param_hw(mwave_uart_irq, int, irq, 0); +module_param_hw(mwave_uart_io, int, ioport, 0); static int mwave_open(struct inode *inode, struct file *file); static int mwave_close(struct inode *inode, struct file *file); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 7d041d026680..ad843eb02ae7 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1945,9 +1945,9 @@ static int init_vqs(struct ports_device *portdev) } } /* Find the queues. */ - err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs, - io_callbacks, - (const char **)io_names, NULL); + err = virtio_find_vqs(portdev->vdev, nr_queues, vqs, + io_callbacks, + (const char **)io_names, NULL); if (err) goto free; diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 9356ab4b7d76..36cfea38135f 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -47,6 +47,14 @@ config COMMON_CLK_RK808 clocked at 32KHz each. Clkout1 is always on, Clkout2 can off by control register. +config COMMON_CLK_HI655X + tristate "Clock driver for Hi655x" + depends on MFD_HI655X_PMIC || COMPILE_TEST + ---help--- + This driver supports the hi655x PMIC clock. This + multi-function device has one fixed-rate oscillator, clocked + at 32KHz. + config COMMON_CLK_SCPI tristate "Clock driver controlled via SCPI interface" depends on ARM_SCPI_PROTOCOL || COMPILE_TEST diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 92c12b86c2e8..c19983afcb81 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o obj-$(CONFIG_COMMON_CLK_PWM) += clk-pwm.o obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o +obj-$(CONFIG_COMMON_CLK_HI655X) += clk-hi655x.o obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o obj-$(CONFIG_COMMON_CLK_SCPI) += clk-scpi.o obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c index 45ad168e1496..7d3223fc7161 100644 --- a/drivers/clk/at91/clk-pll.c +++ b/drivers/clk/at91/clk-pll.c @@ -399,18 +399,18 @@ of_at91_clk_pll_get_characteristics(struct device_node *np) if (!characteristics) return NULL; - output = kzalloc(sizeof(*output) * num_output, GFP_KERNEL); + output = kcalloc(num_output, sizeof(*output), GFP_KERNEL); if (!output) goto out_free_characteristics; if (num_cells > 2) { - out = kzalloc(sizeof(*out) * num_output, GFP_KERNEL); + out = kcalloc(num_output, sizeof(*out), GFP_KERNEL); if (!out) goto out_free_output; } if (num_cells > 3) { - icpll = kzalloc(sizeof(*icpll) * num_output, GFP_KERNEL); + icpll = kcalloc(num_output, sizeof(*icpll), GFP_KERNEL); if (!icpll) goto out_free_output; } diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c index e04634c46395..2d61893da024 100644 --- a/drivers/clk/bcm/clk-iproc-pll.c +++ b/drivers/clk/bcm/clk-iproc-pll.c @@ -277,7 +277,7 @@ static int pll_set_rate(struct iproc_clk *clk, unsigned int rate_index, if (rate >= VCO_LOW && rate < VCO_HIGH) { ki = 4; kp_index = KP_BAND_MID; - } else if (rate >= VCO_HIGH && rate && rate < VCO_HIGH_HIGH) { + } else if (rate >= VCO_HIGH && rate < VCO_HIGH_HIGH) { ki = 3; kp_index = KP_BAND_HIGH; } else if (rate >= VCO_HIGH_HIGH && rate < VCO_MAX) { diff --git a/drivers/clk/bcm/clk-ns2.c b/drivers/clk/bcm/clk-ns2.c index a564e9248814..adc14145861a 100644 --- a/drivers/clk/bcm/clk-ns2.c +++ b/drivers/clk/bcm/clk-ns2.c @@ -103,7 +103,7 @@ CLK_OF_DECLARE(ns2_genpll_src_clk, "brcm,ns2-genpll-scr", static const struct iproc_pll_ctrl genpll_sw = { .flags = IPROC_CLK_AON | IPROC_CLK_PLL_SPLIT_STAT_CTRL, - .aon = AON_VAL(0x0, 2, 9, 8), + .aon = AON_VAL(0x0, 1, 11, 10), .reset = RESET_VAL(0x4, 2, 1), .dig_filter = DF_VAL(0x0, 9, 3, 5, 4, 2, 3), .ndiv_int = REG_VAL(0x8, 4, 10), diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c index 3fca0526d940..c54baede4d68 100644 --- a/drivers/clk/clk-cs2000-cp.c +++ b/drivers/clk/clk-cs2000-cp.c @@ -36,15 +36,35 @@ /* DEVICE_CTRL */ #define PLL_UNLOCK (1 << 7) +#define AUXOUTDIS (1 << 1) +#define CLKOUTDIS (1 << 0) /* DEVICE_CFG1 */ #define RSEL(x) (((x) & 0x3) << 3) #define RSEL_MASK RSEL(0x3) #define ENDEV1 (0x1) +/* DEVICE_CFG2 */ +#define AUTORMOD (1 << 3) +#define LOCKCLK(x) (((x) & 0x3) << 1) +#define LOCKCLK_MASK LOCKCLK(0x3) +#define FRACNSRC_MASK (1 << 0) +#define FRACNSRC_STATIC (0 << 0) +#define FRACNSRC_DYNAMIC (1 << 1) + /* GLOBAL_CFG */ #define ENDEV2 (0x1) +/* FUNC_CFG1 */ +#define CLKSKIPEN (1 << 7) +#define REFCLKDIV(x) (((x) & 0x3) << 3) +#define REFCLKDIV_MASK REFCLKDIV(0x3) + +/* FUNC_CFG2 */ +#define LFRATIO_MASK (1 << 3) +#define LFRATIO_20_12 (0 << 3) +#define LFRATIO_12_20 (1 << 3) + #define CH_SIZE_ERR(ch) ((ch < 0) || (ch >= CH_MAX)) #define hw_to_priv(_hw) container_of(_hw, struct cs2000_priv, hw) #define priv_to_client(priv) (priv->client) @@ -110,6 +130,17 @@ static int cs2000_enable_dev_config(struct cs2000_priv *priv, bool enable) if (ret < 0) return ret; + ret = cs2000_bset(priv, FUNC_CFG1, CLKSKIPEN, + enable ? CLKSKIPEN : 0); + if (ret < 0) + return ret; + + /* FIXME: for Static ratio mode */ + ret = cs2000_bset(priv, FUNC_CFG2, LFRATIO_MASK, + LFRATIO_12_20); + if (ret < 0) + return ret; + return 0; } @@ -127,7 +158,9 @@ static int cs2000_clk_in_bound_rate(struct cs2000_priv *priv, else return -EINVAL; - return cs2000_bset(priv, FUNC_CFG1, 0x3 << 3, val << 3); + return cs2000_bset(priv, FUNC_CFG1, + REFCLKDIV_MASK, + REFCLKDIV(val)); } static int cs2000_wait_pll_lock(struct cs2000_priv *priv) @@ -153,7 +186,10 @@ static int cs2000_wait_pll_lock(struct cs2000_priv *priv) static int cs2000_clk_out_enable(struct cs2000_priv *priv, bool enable) { /* enable both AUX_OUT, CLK_OUT */ - return cs2000_write(priv, DEVICE_CTRL, enable ? 0 : 0x3); + return cs2000_bset(priv, DEVICE_CTRL, + (AUXOUTDIS | CLKOUTDIS), + enable ? 0 : + (AUXOUTDIS | CLKOUTDIS)); } static u32 cs2000_rate_to_ratio(u32 rate_in, u32 rate_out) @@ -243,7 +279,9 @@ static int cs2000_ratio_select(struct cs2000_priv *priv, int ch) if (ret < 0) return ret; - ret = cs2000_write(priv, DEVICE_CFG2, 0x0); + ret = cs2000_bset(priv, DEVICE_CFG2, + (AUTORMOD | LOCKCLK_MASK | FRACNSRC_MASK), + (LOCKCLK(ch) | FRACNSRC_STATIC)); if (ret < 0) return ret; @@ -351,8 +389,7 @@ static const struct clk_ops cs2000_ops = { static int cs2000_clk_get(struct cs2000_priv *priv) { - struct i2c_client *client = priv_to_client(priv); - struct device *dev = &client->dev; + struct device *dev = priv_to_dev(priv); struct clk *clk_in, *ref_clk; clk_in = devm_clk_get(dev, "clk_in"); @@ -420,8 +457,7 @@ static int cs2000_clk_register(struct cs2000_priv *priv) static int cs2000_version_print(struct cs2000_priv *priv) { - struct i2c_client *client = priv_to_client(priv); - struct device *dev = &client->dev; + struct device *dev = priv_to_dev(priv); s32 val; const char *revision; @@ -452,7 +488,7 @@ static int cs2000_version_print(struct cs2000_priv *priv) static int cs2000_remove(struct i2c_client *client) { struct cs2000_priv *priv = i2c_get_clientdata(client); - struct device *dev = &client->dev; + struct device *dev = priv_to_dev(priv); struct device_node *np = dev->of_node; of_clk_del_provider(np); diff --git a/drivers/clk/clk-hi655x.c b/drivers/clk/clk-hi655x.c new file mode 100644 index 000000000000..403a0188634a --- /dev/null +++ b/drivers/clk/clk-hi655x.c @@ -0,0 +1,126 @@ +/* + * Clock driver for Hi655x + * + * Copyright (c) 2017, Linaro Ltd. + * + * Author: Daniel Lezcano <daniel.lezcano@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/mfd/core.h> +#include <linux/mfd/hi655x-pmic.h> + +#define HI655X_CLK_BASE HI655X_BUS_ADDR(0x1c) +#define HI655X_CLK_SET BIT(6) + +struct hi655x_clk { + struct hi655x_pmic *hi655x; + struct clk_hw clk_hw; +}; + +static unsigned long hi655x_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return 32768; +} + +static int hi655x_clk_enable(struct clk_hw *hw, bool enable) +{ + struct hi655x_clk *hi655x_clk = + container_of(hw, struct hi655x_clk, clk_hw); + + struct hi655x_pmic *hi655x = hi655x_clk->hi655x; + + return regmap_update_bits(hi655x->regmap, HI655X_CLK_BASE, + HI655X_CLK_SET, enable ? HI655X_CLK_SET : 0); +} + +static int hi655x_clk_prepare(struct clk_hw *hw) +{ + return hi655x_clk_enable(hw, true); +} + +static void hi655x_clk_unprepare(struct clk_hw *hw) +{ + hi655x_clk_enable(hw, false); +} + +static int hi655x_clk_is_prepared(struct clk_hw *hw) +{ + struct hi655x_clk *hi655x_clk = + container_of(hw, struct hi655x_clk, clk_hw); + struct hi655x_pmic *hi655x = hi655x_clk->hi655x; + int ret; + uint32_t val; + + ret = regmap_read(hi655x->regmap, HI655X_CLK_BASE, &val); + if (ret < 0) + return ret; + + return val & HI655X_CLK_BASE; +} + +static const struct clk_ops hi655x_clk_ops = { + .prepare = hi655x_clk_prepare, + .unprepare = hi655x_clk_unprepare, + .is_prepared = hi655x_clk_is_prepared, + .recalc_rate = hi655x_clk_recalc_rate, +}; + +static int hi655x_clk_probe(struct platform_device *pdev) +{ + struct device *parent = pdev->dev.parent; + struct hi655x_pmic *hi655x = dev_get_drvdata(parent); + struct hi655x_clk *hi655x_clk; + const char *clk_name = "hi655x-clk"; + struct clk_init_data init = { + .name = clk_name, + .ops = &hi655x_clk_ops + }; + int ret; + + hi655x_clk = devm_kzalloc(&pdev->dev, sizeof(*hi655x_clk), GFP_KERNEL); + if (!hi655x_clk) + return -ENOMEM; + + of_property_read_string_index(parent->of_node, "clock-output-names", + 0, &clk_name); + + hi655x_clk->clk_hw.init = &init; + hi655x_clk->hi655x = hi655x; + + platform_set_drvdata(pdev, hi655x_clk); + + ret = devm_clk_hw_register(&pdev->dev, &hi655x_clk->clk_hw); + if (ret) + return ret; + + return of_clk_add_hw_provider(parent->of_node, of_clk_hw_simple_get, + &hi655x_clk->clk_hw); +} + +static struct platform_driver hi655x_clk_driver = { + .probe = hi655x_clk_probe, + .driver = { + .name = "hi655x-clk", + }, +}; + +module_platform_driver(hi655x_clk_driver); + +MODULE_DESCRIPTION("Clk driver for the hi655x series PMICs"); +MODULE_AUTHOR("Daniel Lezcano <daniel.lezcano@linaro.org>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:hi655x-clk"); diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c index 71677eb12565..13ad6d1e5090 100644 --- a/drivers/clk/clk-nomadik.c +++ b/drivers/clk/clk-nomadik.c @@ -267,10 +267,8 @@ pll_clk_register(struct device *dev, const char *name, } pll = kzalloc(sizeof(*pll), GFP_KERNEL); - if (!pll) { - pr_err("%s: could not allocate PLL clk\n", __func__); + if (!pll) return ERR_PTR(-ENOMEM); - } init.name = name; init.ops = &pll_clk_ops; @@ -356,11 +354,9 @@ src_clk_register(struct device *dev, const char *name, struct clk_init_data init; sclk = kzalloc(sizeof(*sclk), GFP_KERNEL); - if (!sclk) { - pr_err("could not allocate SRC clock %s\n", - name); + if (!sclk) return ERR_PTR(-ENOMEM); - } + init.name = name; init.ops = &src_clk_ops; /* Do not force-disable the static SDRAM controller */ @@ -467,7 +463,7 @@ static int nomadik_src_clk_show(struct seq_file *s, void *what) u32 src_pckensr0 = readl(src_base + SRC_PCKENSR0); u32 src_pckensr1 = readl(src_base + SRC_PCKENSR1); - seq_printf(s, "Clock: Boot: Now: Request: ASKED:\n"); + seq_puts(s, "Clock: Boot: Now: Request: ASKED:\n"); for (i = 0; i < ARRAY_SIZE(src_clk_names); i++) { u32 pcksrb = (i < 0x20) ? src_pcksr0_boot : src_pcksr1_boot; u32 pcksr = (i < 0x20) ? src_pcksr0 : src_pcksr1; diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c index b051db43fae1..2492442eea77 100644 --- a/drivers/clk/clk-si5351.c +++ b/drivers/clk/clk-si5351.c @@ -1354,10 +1354,8 @@ static int si5351_i2c_probe(struct i2c_client *client, return -EINVAL; drvdata = devm_kzalloc(&client->dev, sizeof(*drvdata), GFP_KERNEL); - if (drvdata == NULL) { - dev_err(&client->dev, "unable to allocate driver data\n"); + if (!drvdata) return -ENOMEM; - } i2c_set_clientdata(client, drvdata); drvdata->client = client; @@ -1535,9 +1533,9 @@ static int si5351_i2c_probe(struct i2c_client *client, else parent_names[1] = si5351_pll_names[1]; - drvdata->msynth = devm_kzalloc(&client->dev, num_clocks * + drvdata->msynth = devm_kcalloc(&client->dev, num_clocks, sizeof(*drvdata->msynth), GFP_KERNEL); - drvdata->clkout = devm_kzalloc(&client->dev, num_clocks * + drvdata->clkout = devm_kcalloc(&client->dev, num_clocks, sizeof(*drvdata->clkout), GFP_KERNEL); drvdata->num_clkout = num_clocks; diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c index cf9449b3dbd9..68e2a4e499f1 100644 --- a/drivers/clk/clk-stm32f4.c +++ b/drivers/clk/clk-stm32f4.c @@ -531,19 +531,26 @@ static int stm32f4_pll_is_enabled(struct clk_hw *hw) return clk_gate_ops.is_enabled(hw); } +#define PLL_TIMEOUT 10000 + static int stm32f4_pll_enable(struct clk_hw *hw) { struct clk_gate *gate = to_clk_gate(hw); struct stm32f4_pll *pll = to_stm32f4_pll(gate); - int ret = 0; - unsigned long reg; + int bit_status; + unsigned int timeout = PLL_TIMEOUT; - ret = clk_gate_ops.enable(hw); + if (clk_gate_ops.is_enabled(hw)) + return 0; + + clk_gate_ops.enable(hw); - ret = readl_relaxed_poll_timeout_atomic(base + STM32F4_RCC_CR, reg, - reg & (1 << pll->bit_rdy_idx), 0, 10000); + do { + bit_status = !(readl(gate->reg) & BIT(pll->bit_rdy_idx)); - return ret; + } while (bit_status && --timeout); + + return bit_status; } static void stm32f4_pll_disable(struct clk_hw *hw) @@ -834,24 +841,32 @@ struct stm32_rgate { u8 bit_rdy_idx; }; -#define RTC_TIMEOUT 1000000 +#define RGATE_TIMEOUT 50000 static int rgclk_enable(struct clk_hw *hw) { struct clk_gate *gate = to_clk_gate(hw); struct stm32_rgate *rgate = to_rgclk(gate); - u32 reg; - int ret; + int bit_status; + unsigned int timeout = RGATE_TIMEOUT; + + if (clk_gate_ops.is_enabled(hw)) + return 0; disable_power_domain_write_protection(); clk_gate_ops.enable(hw); - ret = readl_relaxed_poll_timeout_atomic(gate->reg, reg, - reg & rgate->bit_rdy_idx, 1000, RTC_TIMEOUT); + do { + bit_status = !(readl(gate->reg) & BIT(rgate->bit_rdy_idx)); + if (bit_status) + udelay(100); + + } while (bit_status && --timeout); enable_power_domain_write_protection(); - return ret; + + return bit_status; } static void rgclk_disable(struct clk_hw *hw) @@ -1533,7 +1548,7 @@ static void __init stm32f4_rcc_init(struct device_node *np) } clks[CLK_LSI] = clk_register_rgate(NULL, "lsi", "clk-lsi", 0, - base + STM32F4_RCC_CSR, 0, 2, 0, &stm32f4_clk_lock); + base + STM32F4_RCC_CSR, 0, 1, 0, &stm32f4_clk_lock); if (IS_ERR(clks[CLK_LSI])) { pr_err("Unable to register lsi clock\n"); @@ -1541,7 +1556,7 @@ static void __init stm32f4_rcc_init(struct device_node *np) } clks[CLK_LSE] = clk_register_rgate(NULL, "lse", "clk-lse", 0, - base + STM32F4_RCC_BDCR, 0, 2, 0, &stm32f4_clk_lock); + base + STM32F4_RCC_BDCR, 0, 1, 0, &stm32f4_clk_lock); if (IS_ERR(clks[CLK_LSE])) { pr_err("Unable to register lse clock\n"); diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c index 56741f3cf0a3..ea7d552a2f2b 100644 --- a/drivers/clk/clk-versaclock5.c +++ b/drivers/clk/clk-versaclock5.c @@ -113,10 +113,29 @@ #define VC5_MUX_IN_XIN BIT(0) #define VC5_MUX_IN_CLKIN BIT(1) +/* Maximum number of clk_out supported by this driver */ +#define VC5_MAX_CLK_OUT_NUM 5 + +/* Maximum number of FODs supported by this driver */ +#define VC5_MAX_FOD_NUM 4 + +/* flags to describe chip features */ +/* chip has built-in oscilator */ +#define VC5_HAS_INTERNAL_XTAL BIT(0) + /* Supported IDT VC5 models. */ enum vc5_model { IDT_VC5_5P49V5923, IDT_VC5_5P49V5933, + IDT_VC5_5P49V5935, +}; + +/* Structure to describe features of a particular VC5 model */ +struct vc5_chip_info { + const enum vc5_model model; + const unsigned int clk_fod_cnt; + const unsigned int clk_out_cnt; + const u32 flags; }; struct vc5_driver_data; @@ -132,15 +151,15 @@ struct vc5_hw_data { struct vc5_driver_data { struct i2c_client *client; struct regmap *regmap; - enum vc5_model model; + const struct vc5_chip_info *chip_info; struct clk *pin_xin; struct clk *pin_clkin; unsigned char clk_mux_ins; struct clk_hw clk_mux; struct vc5_hw_data clk_pll; - struct vc5_hw_data clk_fod[2]; - struct vc5_hw_data clk_out[3]; + struct vc5_hw_data clk_fod[VC5_MAX_FOD_NUM]; + struct vc5_hw_data clk_out[VC5_MAX_CLK_OUT_NUM]; }; static const char * const vc5_mux_names[] = { @@ -563,7 +582,7 @@ static struct clk_hw *vc5_of_clk_get(struct of_phandle_args *clkspec, struct vc5_driver_data *vc5 = data; unsigned int idx = clkspec->args[0]; - if (idx > 2) + if (idx >= vc5->chip_info->clk_out_cnt) return ERR_PTR(-EINVAL); return &vc5->clk_out[idx].hw; @@ -576,6 +595,7 @@ static int vc5_map_index_to_output(const enum vc5_model model, case IDT_VC5_5P49V5933: return (n == 0) ? 0 : 3; case IDT_VC5_5P49V5923: + case IDT_VC5_5P49V5935: default: return n; } @@ -586,12 +606,10 @@ static const struct of_device_id clk_vc5_of_match[]; static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id) { - const struct of_device_id *of_id = - of_match_device(clk_vc5_of_match, &client->dev); struct vc5_driver_data *vc5; struct clk_init_data init; const char *parent_names[2]; - unsigned int n, idx; + unsigned int n, idx = 0; int ret; vc5 = devm_kzalloc(&client->dev, sizeof(*vc5), GFP_KERNEL); @@ -600,7 +618,7 @@ static int vc5_probe(struct i2c_client *client, i2c_set_clientdata(client, vc5); vc5->client = client; - vc5->model = (enum vc5_model)of_id->data; + vc5->chip_info = of_device_get_match_data(&client->dev); vc5->pin_xin = devm_clk_get(&client->dev, "xin"); if (PTR_ERR(vc5->pin_xin) == -EPROBE_DEFER) @@ -622,8 +640,7 @@ static int vc5_probe(struct i2c_client *client, if (!IS_ERR(vc5->pin_xin)) { vc5->clk_mux_ins |= VC5_MUX_IN_XIN; parent_names[init.num_parents++] = __clk_get_name(vc5->pin_xin); - } else if (vc5->model == IDT_VC5_5P49V5933) { - /* IDT VC5 5P49V5933 has built-in oscilator. */ + } else if (vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL) { vc5->pin_xin = clk_register_fixed_rate(&client->dev, "internal-xtal", NULL, 0, 25000000); @@ -672,8 +689,8 @@ static int vc5_probe(struct i2c_client *client, } /* Register FODs */ - for (n = 0; n < 2; n++) { - idx = vc5_map_index_to_output(vc5->model, n); + for (n = 0; n < vc5->chip_info->clk_fod_cnt; n++) { + idx = vc5_map_index_to_output(vc5->chip_info->model, n); memset(&init, 0, sizeof(init)); init.name = vc5_fod_names[idx]; init.ops = &vc5_fod_ops; @@ -709,8 +726,8 @@ static int vc5_probe(struct i2c_client *client, } /* Register FOD-connected OUTx outputs */ - for (n = 1; n < 3; n++) { - idx = vc5_map_index_to_output(vc5->model, n - 1); + for (n = 1; n < vc5->chip_info->clk_out_cnt; n++) { + idx = vc5_map_index_to_output(vc5->chip_info->model, n - 1); parent_names[0] = vc5_fod_names[idx]; if (n == 1) parent_names[1] = vc5_mux_names[0]; @@ -744,7 +761,7 @@ static int vc5_probe(struct i2c_client *client, return 0; err_clk: - if (vc5->model == IDT_VC5_5P49V5933) + if (vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL) clk_unregister_fixed_rate(vc5->pin_xin); return ret; } @@ -755,22 +772,45 @@ static int vc5_remove(struct i2c_client *client) of_clk_del_provider(client->dev.of_node); - if (vc5->model == IDT_VC5_5P49V5933) + if (vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL) clk_unregister_fixed_rate(vc5->pin_xin); return 0; } +static const struct vc5_chip_info idt_5p49v5923_info = { + .model = IDT_VC5_5P49V5923, + .clk_fod_cnt = 2, + .clk_out_cnt = 3, + .flags = 0, +}; + +static const struct vc5_chip_info idt_5p49v5933_info = { + .model = IDT_VC5_5P49V5933, + .clk_fod_cnt = 2, + .clk_out_cnt = 3, + .flags = VC5_HAS_INTERNAL_XTAL, +}; + +static const struct vc5_chip_info idt_5p49v5935_info = { + .model = IDT_VC5_5P49V5935, + .clk_fod_cnt = 4, + .clk_out_cnt = 5, + .flags = VC5_HAS_INTERNAL_XTAL, +}; + static const struct i2c_device_id vc5_id[] = { { "5p49v5923", .driver_data = IDT_VC5_5P49V5923 }, { "5p49v5933", .driver_data = IDT_VC5_5P49V5933 }, + { "5p49v5935", .driver_data = IDT_VC5_5P49V5935 }, { } }; MODULE_DEVICE_TABLE(i2c, vc5_id); static const struct of_device_id clk_vc5_of_match[] = { - { .compatible = "idt,5p49v5923", .data = (void *)IDT_VC5_5P49V5923 }, - { .compatible = "idt,5p49v5933", .data = (void *)IDT_VC5_5P49V5933 }, + { .compatible = "idt,5p49v5923", .data = &idt_5p49v5923_info }, + { .compatible = "idt,5p49v5933", .data = &idt_5p49v5933_info }, + { .compatible = "idt,5p49v5935", .data = &idt_5p49v5935_info }, { }, }; MODULE_DEVICE_TABLE(of, clk_vc5_of_match); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 67201f67a14a..fc58c52a26b4 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -966,6 +966,8 @@ static int __clk_notify(struct clk_core *core, unsigned long msg, cnd.clk = cn->clk; ret = srcu_notifier_call_chain(&cn->notifier_head, msg, &cnd); + if (ret & NOTIFY_STOP_MASK) + return ret; } } @@ -2081,11 +2083,11 @@ static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) clk_dump_one(s, c, level); hlist_for_each_entry(child, &c->children, child_node) { - seq_printf(s, ","); + seq_putc(s, ','); clk_dump_subtree(s, child, level + 1); } - seq_printf(s, "}"); + seq_putc(s, '}'); } static int clk_dump(struct seq_file *s, void *data) @@ -2094,14 +2096,13 @@ static int clk_dump(struct seq_file *s, void *data) bool first_node = true; struct hlist_head **lists = (struct hlist_head **)s->private; - seq_printf(s, "{"); - + seq_putc(s, '{'); clk_prepare_lock(); for (; *lists; lists++) { hlist_for_each_entry(c, *lists, child_node) { if (!first_node) - seq_puts(s, ","); + seq_putc(s, ','); first_node = false; clk_dump_subtree(s, c, 0); } @@ -2126,6 +2127,31 @@ static const struct file_operations clk_dump_fops = { .release = single_release, }; +static int possible_parents_dump(struct seq_file *s, void *data) +{ + struct clk_core *core = s->private; + int i; + + for (i = 0; i < core->num_parents - 1; i++) + seq_printf(s, "%s ", core->parent_names[i]); + + seq_printf(s, "%s\n", core->parent_names[i]); + + return 0; +} + +static int possible_parents_open(struct inode *inode, struct file *file) +{ + return single_open(file, possible_parents_dump, inode->i_private); +} + +static const struct file_operations possible_parents_fops = { + .open = possible_parents_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) { struct dentry *d; @@ -2177,6 +2203,13 @@ static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) if (!d) goto err_out; + if (core->num_parents > 1) { + d = debugfs_create_file("clk_possible_parents", S_IRUGO, + core->dentry, core, &possible_parents_fops); + if (!d) + goto err_out; + } + if (core->ops->debug_init) { ret = core->ops->debug_init(core->hw, core->dentry); if (ret) @@ -2940,7 +2973,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb) /* if clk wasn't in the notifier list, allocate new clk_notifier */ if (cn->clk != clk) { - cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL); + cn = kzalloc(sizeof(*cn), GFP_KERNEL); if (!cn) goto out; @@ -3088,7 +3121,7 @@ int of_clk_add_provider(struct device_node *np, struct of_clk_provider *cp; int ret; - cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL); + cp = kzalloc(sizeof(*cp), GFP_KERNEL); if (!cp) return -ENOMEM; diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c index d04a104ce1b4..fa0fba653898 100644 --- a/drivers/clk/hisilicon/clk-hi3620.c +++ b/drivers/clk/hisilicon/clk-hi3620.c @@ -144,7 +144,7 @@ static struct hisi_divider_clock hi3620_div_clks[] __initdata = { { HI3620_MMC3_DIV, "mmc3_div", "mmc3_mux", 0, 0x140, 5, 4, CLK_DIVIDER_HIWORD_MASK, NULL, }, }; -static struct hisi_gate_clock hi3620_seperated_gate_clks[] __initdata = { +static struct hisi_gate_clock hi3620_separated_gate_clks[] __initdata = { { HI3620_TIMERCLK01, "timerclk01", "timer_rclk01", CLK_SET_RATE_PARENT, 0x20, 0, 0, }, { HI3620_TIMER_RCLK01, "timer_rclk01", "rclk_tcxo", CLK_SET_RATE_PARENT, 0x20, 1, 0, }, { HI3620_TIMERCLK23, "timerclk23", "timer_rclk23", CLK_SET_RATE_PARENT, 0x20, 2, 0, }, @@ -224,8 +224,8 @@ static void __init hi3620_clk_init(struct device_node *np) clk_data); hisi_clk_register_divider(hi3620_div_clks, ARRAY_SIZE(hi3620_div_clks), clk_data); - hisi_clk_register_gate_sep(hi3620_seperated_gate_clks, - ARRAY_SIZE(hi3620_seperated_gate_clks), + hisi_clk_register_gate_sep(hi3620_separated_gate_clks, + ARRAY_SIZE(hi3620_separated_gate_clks), clk_data); } CLK_OF_DECLARE(hi3620_clk, "hisilicon,hi3620-clock", hi3620_clk_init); @@ -430,10 +430,8 @@ static struct clk *hisi_register_clk_mmc(struct hisi_mmc_clock *mmc_clk, struct clk_init_data init; mclk = kzalloc(sizeof(*mclk), GFP_KERNEL); - if (!mclk) { - pr_err("%s: fail to allocate mmc clk\n", __func__); + if (!mclk) return ERR_PTR(-ENOMEM); - } init.name = mmc_clk->name; init.ops = &clk_mmc_ops; @@ -482,11 +480,9 @@ static void __init hi3620_mmc_clk_init(struct device_node *node) if (WARN_ON(!clk_data)) return; - clk_data->clks = kzalloc(sizeof(struct clk *) * num, GFP_KERNEL); - if (!clk_data->clks) { - pr_err("%s: fail to allocate mmc clk\n", __func__); + clk_data->clks = kcalloc(num, sizeof(*clk_data->clks), GFP_KERNEL); + if (!clk_data->clks) return; - } for (i = 0; i < num; i++) { struct hisi_mmc_clock *mmc_clk = &hi3620_mmc_clks[i]; diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c index c0e8e1f196aa..2ae151ce623a 100644 --- a/drivers/clk/hisilicon/clk-hi6220.c +++ b/drivers/clk/hisilicon/clk-hi6220.c @@ -134,6 +134,7 @@ static struct hisi_gate_clock hi6220_separated_gate_clks_sys[] __initdata = { { HI6220_UART4_PCLK, "uart4_pclk", "uart4_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 8, 0, }, { HI6220_SPI_CLK, "spi_clk", "clk_150m", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 9, 0, }, { HI6220_TSENSOR_CLK, "tsensor_clk", "clk_bus", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 12, 0, }, + { HI6220_DAPB_CLK, "dapb_clk", "cs_dapb", CLK_SET_RATE_PARENT|CLK_IS_CRITICAL, 0x230, 18, 0, }, { HI6220_MMU_CLK, "mmu_clk", "ddrc_axi1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x240, 11, 0, }, { HI6220_HIFI_SEL, "hifi_sel", "hifi_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 0, 0, }, { HI6220_MMC0_SYSPLL, "mmc0_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 1, 0, }, diff --git a/drivers/clk/hisilicon/clk.c b/drivers/clk/hisilicon/clk.c index 9ba2d91f4d3a..b73c1dfae7f1 100644 --- a/drivers/clk/hisilicon/clk.c +++ b/drivers/clk/hisilicon/clk.c @@ -54,8 +54,9 @@ struct hisi_clock_data *hisi_clk_alloc(struct platform_device *pdev, if (!clk_data->base) return NULL; - clk_table = devm_kmalloc(&pdev->dev, sizeof(struct clk *) * nr_clks, - GFP_KERNEL); + clk_table = devm_kmalloc_array(&pdev->dev, nr_clks, + sizeof(*clk_table), + GFP_KERNEL); if (!clk_table) return NULL; @@ -80,17 +81,14 @@ struct hisi_clock_data *hisi_clk_init(struct device_node *np, } clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); - if (!clk_data) { - pr_err("%s: could not allocate clock data\n", __func__); + if (!clk_data) goto err; - } - clk_data->base = base; - clk_table = kzalloc(sizeof(struct clk *) * nr_clks, GFP_KERNEL); - if (!clk_table) { - pr_err("%s: could not allocate clock lookup table\n", __func__); + clk_data->base = base; + clk_table = kcalloc(nr_clks, sizeof(*clk_table), GFP_KERNEL); + if (!clk_table) goto err_data; - } + clk_data->clk_data.clks = clk_table; clk_data->clk_data.clk_num = nr_clks; of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data->clk_data); diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c index 75c35fb12b60..b4e0dff3c8c2 100644 --- a/drivers/clk/imx/clk-imx6ul.c +++ b/drivers/clk/imx/clk-imx6ul.c @@ -73,7 +73,7 @@ static struct clk *clks[IMX6UL_CLK_END]; static struct clk_onecell_data clk_data; static int const clks_init_on[] __initconst = { - IMX6UL_CLK_AIPSTZ1, IMX6UL_CLK_AIPSTZ2, IMX6UL_CLK_AIPSTZ3, + IMX6UL_CLK_AIPSTZ1, IMX6UL_CLK_AIPSTZ2, IMX6UL_CLK_AXI, IMX6UL_CLK_ARM, IMX6UL_CLK_ROM, IMX6UL_CLK_MMDC_P0_FAST, IMX6UL_CLK_MMDC_P0_IPG, }; @@ -341,9 +341,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) clks[IMX6UL_CLK_GPT2_SERIAL] = imx_clk_gate2("gpt2_serial", "perclk", base + 0x68, 26); clks[IMX6UL_CLK_UART2_IPG] = imx_clk_gate2("uart2_ipg", "ipg", base + 0x68, 28); clks[IMX6UL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28); - if (clk_on_imx6ul()) - clks[IMX6UL_CLK_AIPSTZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x68, 30); - else if (clk_on_imx6ull()) + if (clk_on_imx6ull()) clks[IMX6UL_CLK_AIPSTZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x80, 18); /* CCGR1 */ @@ -360,7 +358,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) clks[IMX6UL_CLK_GPT1_BUS] = imx_clk_gate2("gpt1_bus", "perclk", base + 0x6c, 20); clks[IMX6UL_CLK_GPT1_SERIAL] = imx_clk_gate2("gpt1_serial", "perclk", base + 0x6c, 22); clks[IMX6UL_CLK_UART4_IPG] = imx_clk_gate2("uart4_ipg", "ipg", base + 0x6c, 24); - clks[IMX6UL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serail", "uart_podf", base + 0x6c, 24); + clks[IMX6UL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serial", "uart_podf", base + 0x6c, 24); /* CCGR2 */ if (clk_on_imx6ull()) { @@ -482,6 +480,9 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) clk_prepare_enable(clks[clks_init_on[i]]); + if (clk_on_imx6ull()) + clk_prepare_enable(clks[IMX6UL_CLK_AIPSTZ3]); + if (IS_ENABLED(CONFIG_USB_MXS_PHY)) { clk_prepare_enable(clks[IMX6UL_CLK_USBPHY1_GATE]); clk_prepare_enable(clks[IMX6UL_CLK_USBPHY2_GATE]); diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c index ae1d31be906e..93b03640da9b 100644 --- a/drivers/clk/imx/clk-imx7d.c +++ b/drivers/clk/imx/clk-imx7d.c @@ -386,7 +386,7 @@ static int const clks_init_on[] __initconst = { IMX7D_PLL_SYS_MAIN_480M_CLK, IMX7D_NAND_USDHC_BUS_ROOT_CLK, IMX7D_DRAM_PHYM_ROOT_CLK, IMX7D_DRAM_ROOT_CLK, IMX7D_DRAM_PHYM_ALT_ROOT_CLK, IMX7D_DRAM_ALT_ROOT_CLK, - IMX7D_AHB_CHANNEL_ROOT_CLK, + IMX7D_AHB_CHANNEL_ROOT_CLK, IMX7D_IPG_ROOT_CLK, }; static struct clk_onecell_data clk_data; @@ -724,8 +724,9 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) clks[IMX7D_MAIN_AXI_ROOT_DIV] = imx_clk_divider2("axi_post_div", "axi_pre_div", base + 0x8800, 0, 6); clks[IMX7D_DISP_AXI_ROOT_DIV] = imx_clk_divider2("disp_axi_post_div", "disp_axi_pre_div", base + 0x8880, 0, 6); clks[IMX7D_ENET_AXI_ROOT_DIV] = imx_clk_divider2("enet_axi_post_div", "enet_axi_pre_div", base + 0x8900, 0, 6); - clks[IMX7D_NAND_USDHC_BUS_ROOT_DIV] = imx_clk_divider2("nand_usdhc_post_div", "nand_usdhc_pre_div", base + 0x8980, 0, 6); - clks[IMX7D_AHB_CHANNEL_ROOT_DIV] = imx_clk_divider2("ahb_post_div", "ahb_pre_div", base + 0x9000, 0, 6); + clks[IMX7D_NAND_USDHC_BUS_ROOT_CLK] = imx_clk_divider2("nand_usdhc_root_clk", "nand_usdhc_pre_div", base + 0x8980, 0, 6); + clks[IMX7D_AHB_CHANNEL_ROOT_DIV] = imx_clk_divider2("ahb_root_clk", "ahb_pre_div", base + 0x9000, 0, 6); + clks[IMX7D_IPG_ROOT_CLK] = imx_clk_divider2("ipg_root_clk", "ahb_root_clk", base + 0x9080, 0, 2); clks[IMX7D_DRAM_ROOT_DIV] = imx_clk_divider2("dram_post_div", "dram_cg", base + 0x9880, 0, 3); clks[IMX7D_DRAM_PHYM_ALT_ROOT_DIV] = imx_clk_divider2("dram_phym_alt_post_div", "dram_phym_alt_pre_div", base + 0xa000, 0, 3); clks[IMX7D_DRAM_ALT_ROOT_DIV] = imx_clk_divider2("dram_alt_post_div", "dram_alt_pre_div", base + 0xa080, 0, 3); @@ -796,9 +797,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) clks[IMX7D_DISP_AXI_ROOT_CLK] = imx_clk_gate4("disp_axi_root_clk", "disp_axi_post_div", base + 0x4050, 0); clks[IMX7D_ENET_AXI_ROOT_CLK] = imx_clk_gate4("enet_axi_root_clk", "enet_axi_post_div", base + 0x4060, 0); clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "axi_post_div", base + 0x4110, 0); - clks[IMX7D_OCRAM_S_CLK] = imx_clk_gate4("ocram_s_clk", "ahb_post_div", base + 0x4120, 0); - clks[IMX7D_NAND_USDHC_BUS_ROOT_CLK] = imx_clk_gate4("nand_usdhc_root_clk", "nand_usdhc_post_div", base + 0x4130, 0); - clks[IMX7D_AHB_CHANNEL_ROOT_CLK] = imx_clk_gate4("ahb_root_clk", "ahb_post_div", base + 0x4200, 0); + clks[IMX7D_OCRAM_S_CLK] = imx_clk_gate4("ocram_s_clk", "ahb_root_clk", base + 0x4120, 0); clks[IMX7D_DRAM_ROOT_CLK] = imx_clk_gate4("dram_root_clk", "dram_post_div", base + 0x4130, 0); clks[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_gate4("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0); clks[IMX7D_DRAM_PHYM_ALT_ROOT_CLK] = imx_clk_gate4("dram_phym_alt_root_clk", "dram_phym_alt_post_div", base + 0x4130, 0); diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig index a01ef7806aed..28739a9a6e37 100644 --- a/drivers/clk/mediatek/Kconfig +++ b/drivers/clk/mediatek/Kconfig @@ -50,6 +50,38 @@ config COMMON_CLK_MT2701_BDPSYS ---help--- This driver supports Mediatek MT2701 bdpsys clocks. +config COMMON_CLK_MT6797 + bool "Clock driver for Mediatek MT6797" + depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST + select COMMON_CLK_MEDIATEK + default ARCH_MEDIATEK && ARM64 + ---help--- + This driver supports Mediatek MT6797 basic clocks. + +config COMMON_CLK_MT6797_MMSYS + bool "Clock driver for Mediatek MT6797 mmsys" + depends on COMMON_CLK_MT6797 + ---help--- + This driver supports Mediatek MT6797 mmsys clocks. + +config COMMON_CLK_MT6797_IMGSYS + bool "Clock driver for Mediatek MT6797 imgsys" + depends on COMMON_CLK_MT6797 + ---help--- + This driver supports Mediatek MT6797 imgsys clocks. + +config COMMON_CLK_MT6797_VDECSYS + bool "Clock driver for Mediatek MT6797 vdecsys" + depends on COMMON_CLK_MT6797 + ---help--- + This driver supports Mediatek MT6797 vdecsys clocks. + +config COMMON_CLK_MT6797_VENCSYS + bool "Clock driver for Mediatek MT6797 vencsys" + depends on COMMON_CLK_MT6797 + ---help--- + This driver supports Mediatek MT6797 vencsys clocks. + config COMMON_CLK_MT8135 bool "Clock driver for Mediatek MT8135" depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile index 19ae7ef79b57..5c3afb86b9ec 100644 --- a/drivers/clk/mediatek/Makefile +++ b/drivers/clk/mediatek/Makefile @@ -1,5 +1,10 @@ obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o obj-$(CONFIG_RESET_CONTROLLER) += reset.o +obj-$(CONFIG_COMMON_CLK_MT6797) += clk-mt6797.o +obj-$(CONFIG_COMMON_CLK_MT6797_IMGSYS) += clk-mt6797-img.o +obj-$(CONFIG_COMMON_CLK_MT6797_MMSYS) += clk-mt6797-mm.o +obj-$(CONFIG_COMMON_CLK_MT6797_VDECSYS) += clk-mt6797-vdec.o +obj-$(CONFIG_COMMON_CLK_MT6797_VENCSYS) += clk-mt6797-venc.o obj-$(CONFIG_COMMON_CLK_MT2701) += clk-mt2701.o obj-$(CONFIG_COMMON_CLK_MT2701_BDPSYS) += clk-mt2701-bdp.o obj-$(CONFIG_COMMON_CLK_MT2701_ETHSYS) += clk-mt2701-eth.o diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c index 877be8715afa..9251a6551522 100644 --- a/drivers/clk/mediatek/clk-mt2701-eth.c +++ b/drivers/clk/mediatek/clk-mt2701-eth.c @@ -66,6 +66,8 @@ static int clk_mt2701_eth_probe(struct platform_device *pdev) "could not register clock provider: %s: %d\n", pdev->name, r); + mtk_register_reset_controller(node, 1, 0x34); + return r; } diff --git a/drivers/clk/mediatek/clk-mt6797-img.c b/drivers/clk/mediatek/clk-mt6797-img.c new file mode 100644 index 000000000000..94cc48065918 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6797-img.c @@ -0,0 +1,76 @@ +/* Copyright (c) 2017 MediaTek Inc. + * Author: Kevin Chen <kevin-cw.chen@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/mt6797-clk.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +static const struct mtk_gate_regs img_cg_regs = { + .set_ofs = 0x0004, + .clr_ofs = 0x0008, + .sta_ofs = 0x0000, +}; + +#define GATE_IMG(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &img_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ + } + +static const struct mtk_gate img_clks[] = { + GATE_IMG(CLK_IMG_FDVT, "img_fdvt", "mm_sel", 11), + GATE_IMG(CLK_IMG_DPE, "img_dpe", "mm_sel", 10), + GATE_IMG(CLK_IMG_DIP, "img_dip", "mm_sel", 6), + GATE_IMG(CLK_IMG_LARB6, "img_larb6", "mm_sel", 0), +}; + +static const struct of_device_id of_match_clk_mt6797_img[] = { + { .compatible = "mediatek,mt6797-imgsys", }, + {} +}; + +static int clk_mt6797_img_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + int r; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_IMG_NR); + + mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), + clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + if (r) + dev_err(&pdev->dev, + "could not register clock provider: %s: %d\n", + pdev->name, r); + + return r; +} + +static struct platform_driver clk_mt6797_img_drv = { + .probe = clk_mt6797_img_probe, + .driver = { + .name = "clk-mt6797-img", + .of_match_table = of_match_clk_mt6797_img, + }, +}; + +builtin_platform_driver(clk_mt6797_img_drv); diff --git a/drivers/clk/mediatek/clk-mt6797-mm.c b/drivers/clk/mediatek/clk-mt6797-mm.c new file mode 100644 index 000000000000..c57d3eed270d --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6797-mm.c @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Kevin Chen <kevin-cw.chen@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/mt6797-clk.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +static const struct mtk_gate_regs mm0_cg_regs = { + .set_ofs = 0x0104, + .clr_ofs = 0x0108, + .sta_ofs = 0x0100, +}; + +static const struct mtk_gate_regs mm1_cg_regs = { + .set_ofs = 0x0114, + .clr_ofs = 0x0118, + .sta_ofs = 0x0110, +}; + +#define GATE_MM0(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &mm0_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ +} + +#define GATE_MM1(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &mm1_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ +} + +static const struct mtk_gate mm_clks[] = { + GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0), + GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1), + GATE_MM0(CLK_MM_SMI_LARB5, "mm_smi_larb5", "mm_sel", 2), + GATE_MM0(CLK_MM_CAM_MDP, "mm_cam_mdp", "mm_sel", 3), + GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 4), + GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 5), + GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 6), + GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 7), + GATE_MM0(CLK_MM_MDP_RSZ2, "mm_mdp_rsz2", "mm_sel", 8), + GATE_MM0(CLK_MM_MDP_TDSHP, "mm_mdp_tdshp", "mm_sel", 9), + GATE_MM0(CLK_MM_MDP_COLOR, "mm_mdp_color", "mm_sel", 10), + GATE_MM0(CLK_MM_MDP_WDMA, "mm_mdp_wdma", "mm_sel", 11), + GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 12), + GATE_MM0(CLK_MM_MDP_WROT1, "mm_mdp_wrot1", "mm_sel", 13), + GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 14), + GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 15), + GATE_MM0(CLK_MM_DISP_OVL1, "mm_disp_ovl1", "mm_sel", 16), + GATE_MM0(CLK_MM_DISP_OVL0_2L, "mm_disp_ovl0_2l", "mm_sel", 17), + GATE_MM0(CLK_MM_DISP_OVL1_2L, "mm_disp_ovl1_2l", "mm_sel", 18), + GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 19), + GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 20), + GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 21), + GATE_MM0(CLK_MM_DISP_WDMA1, "mm_disp_wdma1", "mm_sel", 22), + GATE_MM0(CLK_MM_DISP_COLOR, "mm_disp_color", "mm_sel", 23), + GATE_MM0(CLK_MM_DISP_CCORR, "mm_disp_ccorr", "mm_sel", 24), + GATE_MM0(CLK_MM_DISP_AAL, "mm_disp_aal", "mm_sel", 25), + GATE_MM0(CLK_MM_DISP_GAMMA, "mm_disp_gamma", "mm_sel", 26), + GATE_MM0(CLK_MM_DISP_OD, "mm_disp_od", "mm_sel", 27), + GATE_MM0(CLK_MM_DISP_DITHER, "mm_disp_dither", "mm_sel", 28), + GATE_MM0(CLK_MM_DISP_UFOE, "mm_disp_ufoe", "mm_sel", 29), + GATE_MM0(CLK_MM_DISP_DSC, "mm_disp_dsc", "mm_sel", 30), + GATE_MM0(CLK_MM_DISP_SPLIT, "mm_disp_split", "mm_sel", 31), + GATE_MM1(CLK_MM_DSI0_MM_CLOCK, "mm_dsi0_mm_clock", "mm_sel", 0), + GATE_MM1(CLK_MM_DSI1_MM_CLOCK, "mm_dsi1_mm_clock", "mm_sel", 2), + GATE_MM1(CLK_MM_DPI_MM_CLOCK, "mm_dpi_mm_clock", "mm_sel", 4), + GATE_MM1(CLK_MM_DPI_INTERFACE_CLOCK, "mm_dpi_interface_clock", + "dpi0_sel", 5), + GATE_MM1(CLK_MM_LARB4_AXI_ASIF_MM_CLOCK, "mm_larb4_axi_asif_mm_clock", + "mm_sel", 6), + GATE_MM1(CLK_MM_LARB4_AXI_ASIF_MJC_CLOCK, "mm_larb4_axi_asif_mjc_clock", + "mjc_sel", 7), + GATE_MM1(CLK_MM_DISP_OVL0_MOUT_CLOCK, "mm_disp_ovl0_mout_clock", + "mm_sel", 8), + GATE_MM1(CLK_MM_FAKE_ENG2, "mm_fake_eng2", "mm_sel", 9), + GATE_MM1(CLK_MM_DSI0_INTERFACE_CLOCK, "mm_dsi0_interface_clock", + "clk26m", 1), + GATE_MM1(CLK_MM_DSI1_INTERFACE_CLOCK, "mm_dsi1_interface_clock", + "clk26m", 3), +}; + +static const struct of_device_id of_match_clk_mt6797_mm[] = { + { .compatible = "mediatek,mt6797-mmsys", }, + {} +}; + +static int clk_mt6797_mm_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + int r; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_MM_NR); + + mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), + clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + if (r) + dev_err(&pdev->dev, + "could not register clock provider: %s: %d\n", + pdev->name, r); + + return r; +} + +static struct platform_driver clk_mt6797_mm_drv = { + .probe = clk_mt6797_mm_probe, + .driver = { + .name = "clk-mt6797-mm", + .of_match_table = of_match_clk_mt6797_mm, + }, +}; + +builtin_platform_driver(clk_mt6797_mm_drv); diff --git a/drivers/clk/mediatek/clk-mt6797-vdec.c b/drivers/clk/mediatek/clk-mt6797-vdec.c new file mode 100644 index 000000000000..7c402ca6c0b2 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6797-vdec.c @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Kevin-CW Chen <kevin-cw.chen@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt6797-clk.h> + +static const struct mtk_gate_regs vdec0_cg_regs = { + .set_ofs = 0x0000, + .clr_ofs = 0x0004, + .sta_ofs = 0x0000, +}; + +static const struct mtk_gate_regs vdec1_cg_regs = { + .set_ofs = 0x0008, + .clr_ofs = 0x000c, + .sta_ofs = 0x0008, +}; + +#define GATE_VDEC0(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &vdec0_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr_inv, \ +} + +#define GATE_VDEC1(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &vdec1_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr_inv, \ +} + +static const struct mtk_gate vdec_clks[] = { + GATE_VDEC0(CLK_VDEC_CKEN_ENG, "vdec_cken_eng", "vdec_sel", 8), + GATE_VDEC0(CLK_VDEC_ACTIVE, "vdec_active", "vdec_sel", 4), + GATE_VDEC0(CLK_VDEC_CKEN, "vdec_cken", "vdec_sel", 0), + GATE_VDEC1(CLK_VDEC_LARB1_CKEN, "vdec_larb1_cken", "mm_sel", 0), +}; + +static const struct of_device_id of_match_clk_mt6797_vdec[] = { + { .compatible = "mediatek,mt6797-vdecsys", }, + {} +}; + +static int clk_mt6797_vdec_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + int r; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_VDEC_NR); + + mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks), + clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + if (r) + dev_err(&pdev->dev, + "could not register clock provider: %s: %d\n", + pdev->name, r); + + return r; +} + +static struct platform_driver clk_mt6797_vdec_drv = { + .probe = clk_mt6797_vdec_probe, + .driver = { + .name = "clk-mt6797-vdec", + .of_match_table = of_match_clk_mt6797_vdec, + }, +}; + +builtin_platform_driver(clk_mt6797_vdec_drv); diff --git a/drivers/clk/mediatek/clk-mt6797-venc.c b/drivers/clk/mediatek/clk-mt6797-venc.c new file mode 100644 index 000000000000..e73d51756f13 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6797-venc.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Kevin Chen <kevin-cw.chen@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt6797-clk.h> + +static const struct mtk_gate_regs venc_cg_regs = { + .set_ofs = 0x0004, + .clr_ofs = 0x0008, + .sta_ofs = 0x0000, +}; + +#define GATE_VENC(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &venc_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr_inv, \ + } + +static const struct mtk_gate venc_clks[] = { + GATE_VENC(CLK_VENC_0, "venc_0", "mm_sel", 0), + GATE_VENC(CLK_VENC_1, "venc_1", "venc_sel", 4), + GATE_VENC(CLK_VENC_2, "venc_2", "venc_sel", 8), + GATE_VENC(CLK_VENC_3, "venc_3", "venc_sel", 12), +}; + +static const struct of_device_id of_match_clk_mt6797_venc[] = { + { .compatible = "mediatek,mt6797-vencsys", }, + {} +}; + +static int clk_mt6797_venc_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + int r; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_VENC_NR); + + mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks), + clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + if (r) + dev_err(&pdev->dev, + "could not register clock provider: %s: %d\n", + pdev->name, r); + + return r; +} + +static struct platform_driver clk_mt6797_venc_drv = { + .probe = clk_mt6797_venc_probe, + .driver = { + .name = "clk-mt6797-venc", + .of_match_table = of_match_clk_mt6797_venc, + }, +}; + +builtin_platform_driver(clk_mt6797_venc_drv); diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c new file mode 100644 index 000000000000..5702bc974ed9 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6797.c @@ -0,0 +1,714 @@ +/* + * Copyright (c) 2016 MediaTek Inc. + * Author: Kevin Chen <kevin-cw.chen@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt6797-clk.h> + +/* + * For some clocks, we don't care what their actual rates are. And these + * clocks may change their rate on different products or different scenarios. + * So we model these clocks' rate as 0, to denote it's not an actual rate. + */ + +static DEFINE_SPINLOCK(mt6797_clk_lock); + +static const struct mtk_fixed_factor top_fixed_divs[] = { + FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1, 1), + FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll", 1, 2), + FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "syspll_d2", 1, 2), + FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "syspll_d2", 1, 4), + FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "syspll_d2", 1, 8), + FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "syspll_d2", 1, 16), + FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll", 1, 3), + FACTOR(CLK_TOP_SYSPLL_D3_D3, "syspll_d3_d3", "syspll_d3", 1, 3), + FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "syspll_d3", 1, 2), + FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "syspll_d3", 1, 4), + FACTOR(CLK_TOP_SYSPLL2_D8, "syspll2_d8", "syspll_d3", 1, 8), + FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1, 5), + FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "syspll_d5", 1, 2), + FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "syspll_d5", 1, 4), + FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "mainpll", 1, 7), + FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "syspll_d7", 1, 2), + FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "syspll_d7", 1, 4), + FACTOR(CLK_TOP_UNIVPLL_CK, "univpll_ck", "univpll", 1, 1), + FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1, 7), + FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univpll", 1, 26), + FACTOR(CLK_TOP_SSUSB_PHY_48M_CK, "ssusb_phy_48m_ck", "univpll", 1, 1), + FACTOR(CLK_TOP_USB_PHY48M_CK, "usb_phy48m_ck", "univpll", 1, 1), + FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2), + FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll_d2", 1, 2), + FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll_d2", 1, 4), + FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll_d2", 1, 8), + FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3), + FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll", 1, 2), + FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll", 1, 4), + FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll", 1, 8), + FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5), + FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll_d5", 1, 2), + FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll_d5", 1, 4), + FACTOR(CLK_TOP_UNIVPLL3_D8, "univpll3_d8", "univpll_d5", 1, 8), + FACTOR(CLK_TOP_ULPOSC_CK_ORG, "ulposc_ck_org", "ulposc", 1, 1), + FACTOR(CLK_TOP_ULPOSC_CK, "ulposc_ck", "ulposc_ck_org", 1, 3), + FACTOR(CLK_TOP_ULPOSC_D2, "ulposc_d2", "ulposc_ck", 1, 2), + FACTOR(CLK_TOP_ULPOSC_D3, "ulposc_d3", "ulposc_ck", 1, 4), + FACTOR(CLK_TOP_ULPOSC_D4, "ulposc_d4", "ulposc_ck", 1, 8), + FACTOR(CLK_TOP_ULPOSC_D8, "ulposc_d8", "ulposc_ck", 1, 10), + FACTOR(CLK_TOP_ULPOSC_D10, "ulposc_d10", "ulposc_ck_org", 1, 1), + FACTOR(CLK_TOP_APLL1_CK, "apll1_ck", "apll1", 1, 1), + FACTOR(CLK_TOP_APLL2_CK, "apll2_ck", "apll2", 1, 1), + FACTOR(CLK_TOP_MFGPLL_CK, "mfgpll_ck", "mfgpll", 1, 1), + FACTOR(CLK_TOP_MFGPLL_D2, "mfgpll_d2", "mfgpll_ck", 1, 2), + FACTOR(CLK_TOP_IMGPLL_CK, "imgpll_ck", "imgpll", 1, 1), + FACTOR(CLK_TOP_IMGPLL_D2, "imgpll_d2", "imgpll_ck", 1, 2), + FACTOR(CLK_TOP_IMGPLL_D4, "imgpll_d4", "imgpll_ck", 1, 4), + FACTOR(CLK_TOP_CODECPLL_CK, "codecpll_ck", "codecpll", 1, 1), + FACTOR(CLK_TOP_CODECPLL_D2, "codecpll_d2", "codecpll_ck", 1, 2), + FACTOR(CLK_TOP_VDECPLL_CK, "vdecpll_ck", "vdecpll", 1, 1), + FACTOR(CLK_TOP_TVDPLL_CK, "tvdpll_ck", "tvdpll", 1, 1), + FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_ck", 1, 2), + FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll_ck", 1, 4), + FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll_ck", 1, 8), + FACTOR(CLK_TOP_TVDPLL_D16, "tvdpll_d16", "tvdpll_ck", 1, 16), + FACTOR(CLK_TOP_MSDCPLL_CK, "msdcpll_ck", "msdcpll", 1, 1), + FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll_ck", 1, 2), + FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll_ck", 1, 4), + FACTOR(CLK_TOP_MSDCPLL_D8, "msdcpll_d8", "msdcpll_ck", 1, 8), +}; + +static const char * const axi_parents[] = { + "clk26m", + "syspll_d7", + "ulposc_axi_ck_mux", +}; + +static const char * const ulposc_axi_ck_mux_parents[] = { + "syspll1_d4", + "ulposc_axi_ck_mux_pre", +}; + +static const char * const ulposc_axi_ck_mux_pre_parents[] = { + "ulposc_d2", + "ulposc_d3", +}; + +static const char * const ddrphycfg_parents[] = { + "clk26m", + "syspll3_d2", + "syspll2_d4", + "syspll1_d8", +}; + +static const char * const mm_parents[] = { + "clk26m", + "imgpll_ck", + "univpll1_d2", + "syspll1_d2", +}; + +static const char * const pwm_parents[] = { + "clk26m", + "univpll2_d4", + "ulposc_d2", + "ulposc_d3", + "ulposc_d8", + "ulposc_d10", + "ulposc_d4", +}; + +static const char * const vdec_parents[] = { + "clk26m", + "vdecpll_ck", + "imgpll_ck", + "syspll_d3", + "univpll_d5", + "clk26m", + "clk26m", +}; + +static const char * const venc_parents[] = { + "clk26m", + "codecpll_ck", + "syspll_d3", +}; + +static const char * const mfg_parents[] = { + "clk26m", + "mfgpll_ck", + "syspll_d3", + "univpll_d3", +}; + +static const char * const camtg[] = { + "clk26m", + "univpll_d26", + "univpll2_d2", +}; + +static const char * const uart_parents[] = { + "clk26m", + "univpll2_d8", +}; + +static const char * const spi_parents[] = { + "clk26m", + "syspll3_d2", + "syspll2_d4", + "ulposc_spi_ck_mux", +}; + +static const char * const ulposc_spi_ck_mux_parents[] = { + "ulposc_d2", + "ulposc_d3", +}; + +static const char * const usb20_parents[] = { + "clk26m", + "univpll1_d8", + "syspll4_d2", +}; + +static const char * const msdc50_0_hclk_parents[] = { + "clk26m", + "syspll1_d2", + "syspll2_d2", + "syspll4_d2", +}; + +static const char * const msdc50_0_parents[] = { + "clk26m", + "msdcpll", + "syspll_d3", + "univpll1_d4", + "syspll2_d2", + "syspll_d7", + "msdcpll_d2", + "univpll1_d2", + "univpll_d3", +}; + +static const char * const msdc30_1_parents[] = { + "clk26m", + "univpll2_d2", + "msdcpll_d2", + "univpll1_d4", + "syspll2_d2", + "syspll_d7", + "univpll_d7", +}; + +static const char * const msdc30_2_parents[] = { + "clk26m", + "univpll2_d8", + "syspll2_d8", + "syspll1_d8", + "msdcpll_d8", + "syspll3_d4", + "univpll_d26", +}; + +static const char * const audio_parents[] = { + "clk26m", + "syspll3_d4", + "syspll4_d4", + "syspll1_d16", +}; + +static const char * const aud_intbus_parents[] = { + "clk26m", + "syspll1_d4", + "syspll4_d2", +}; + +static const char * const pmicspi_parents[] = { + "clk26m", + "univpll_d26", + "syspll3_d4", + "syspll1_d8", + "ulposc_d4", + "ulposc_d8", + "syspll2_d8", +}; + +static const char * const scp_parents[] = { + "clk26m", + "syspll_d3", + "ulposc_ck", + "univpll_d5", +}; + +static const char * const atb_parents[] = { + "clk26m", + "syspll1_d2", + "syspll_d5", +}; + +static const char * const mjc_parents[] = { + "clk26m", + "imgpll_ck", + "univpll_d5", + "syspll1_d2", +}; + +static const char * const dpi0_parents[] = { + "clk26m", + "tvdpll_d2", + "tvdpll_d4", + "tvdpll_d8", + "tvdpll_d16", + "clk26m", + "clk26m", +}; + +static const char * const aud_1_parents[] = { + "clk26m", + "apll1_ck", +}; + +static const char * const aud_2_parents[] = { + "clk26m", + "apll2_ck", +}; + +static const char * const ssusb_top_sys_parents[] = { + "clk26m", + "univpll3_d2", +}; + +static const char * const spm_parents[] = { + "clk26m", + "syspll1_d8", +}; + +static const char * const bsi_spi_parents[] = { + "clk26m", + "syspll_d3_d3", + "syspll1_d4", + "syspll_d7", +}; + +static const char * const audio_h_parents[] = { + "clk26m", + "apll2_ck", + "apll1_ck", + "univpll_d7", +}; + +static const char * const mfg_52m_parents[] = { + "clk26m", + "univpll2_d8", + "univpll2_d4", + "univpll2_d4", +}; + +static const char * const anc_md32_parents[] = { + "clk26m", + "syspll1_d2", + "univpll_d5", +}; + +static const struct mtk_composite top_muxes[] = { + MUX(CLK_TOP_MUX_ULPOSC_AXI_CK_MUX_PRE, "ulposc_axi_ck_mux_pre", + ulposc_axi_ck_mux_pre_parents, 0x0040, 3, 1), + MUX(CLK_TOP_MUX_ULPOSC_AXI_CK_MUX, "ulposc_axi_ck_mux", + ulposc_axi_ck_mux_parents, 0x0040, 2, 1), + MUX(CLK_TOP_MUX_AXI, "axi_sel", axi_parents, + 0x0040, 0, 2), + MUX(CLK_TOP_MUX_DDRPHYCFG, "ddrphycfg_sel", ddrphycfg_parents, + 0x0040, 16, 2), + MUX(CLK_TOP_MUX_MM, "mm_sel", mm_parents, + 0x0040, 24, 2), + MUX_GATE(CLK_TOP_MUX_PWM, "pwm_sel", pwm_parents, 0x0050, 0, 3, 7), + MUX_GATE(CLK_TOP_MUX_VDEC, "vdec_sel", vdec_parents, 0x0050, 8, 3, 15), + MUX_GATE(CLK_TOP_MUX_VENC, "venc_sel", venc_parents, 0x0050, 16, 2, 23), + MUX_GATE(CLK_TOP_MUX_MFG, "mfg_sel", mfg_parents, 0x0050, 24, 2, 31), + MUX_GATE(CLK_TOP_MUX_CAMTG, "camtg_sel", camtg, 0x0060, 0, 2, 7), + MUX_GATE(CLK_TOP_MUX_UART, "uart_sel", uart_parents, 0x0060, 8, 1, 15), + MUX_GATE(CLK_TOP_MUX_SPI, "spi_sel", spi_parents, 0x0060, 16, 2, 23), + MUX(CLK_TOP_MUX_ULPOSC_SPI_CK_MUX, "ulposc_spi_ck_mux", + ulposc_spi_ck_mux_parents, 0x0060, 18, 1), + MUX_GATE(CLK_TOP_MUX_USB20, "usb20_sel", usb20_parents, + 0x0060, 24, 2, 31), + MUX(CLK_TOP_MUX_MSDC50_0_HCLK, "msdc50_0_hclk_sel", + msdc50_0_hclk_parents, 0x0070, 8, 2), + MUX_GATE(CLK_TOP_MUX_MSDC50_0, "msdc50_0_sel", msdc50_0_parents, + 0x0070, 16, 4, 23), + MUX_GATE(CLK_TOP_MUX_MSDC30_1, "msdc30_1_sel", msdc30_1_parents, + 0x0070, 24, 3, 31), + MUX_GATE(CLK_TOP_MUX_MSDC30_2, "msdc30_2_sel", msdc30_2_parents, + 0x0080, 0, 3, 7), + MUX_GATE(CLK_TOP_MUX_AUDIO, "audio_sel", audio_parents, + 0x0080, 16, 2, 23), + MUX(CLK_TOP_MUX_AUD_INTBUS, "aud_intbus_sel", aud_intbus_parents, + 0x0080, 24, 2), + MUX(CLK_TOP_MUX_PMICSPI, "pmicspi_sel", pmicspi_parents, + 0x0090, 0, 3), + MUX(CLK_TOP_MUX_SCP, "scp_sel", scp_parents, + 0x0090, 8, 2), + MUX(CLK_TOP_MUX_ATB, "atb_sel", atb_parents, + 0x0090, 16, 2), + MUX_GATE(CLK_TOP_MUX_MJC, "mjc_sel", mjc_parents, 0x0090, 24, 2, 31), + MUX_GATE(CLK_TOP_MUX_DPI0, "dpi0_sel", dpi0_parents, 0x00A0, 0, 3, 7), + MUX_GATE(CLK_TOP_MUX_AUD_1, "aud_1_sel", aud_1_parents, + 0x00A0, 16, 1, 23), + MUX_GATE(CLK_TOP_MUX_AUD_2, "aud_2_sel", aud_2_parents, + 0x00A0, 24, 1, 31), + MUX(CLK_TOP_MUX_SSUSB_TOP_SYS, "ssusb_top_sys_sel", + ssusb_top_sys_parents, 0x00B0, 8, 1), + MUX(CLK_TOP_MUX_SPM, "spm_sel", spm_parents, + 0x00C0, 0, 1), + MUX(CLK_TOP_MUX_BSI_SPI, "bsi_spi_sel", bsi_spi_parents, + 0x00C0, 8, 2), + MUX_GATE(CLK_TOP_MUX_AUDIO_H, "audio_h_sel", audio_h_parents, + 0x00C0, 16, 2, 23), + MUX_GATE(CLK_TOP_MUX_ANC_MD32, "anc_md32_sel", anc_md32_parents, + 0x00C0, 24, 2, 31), + MUX(CLK_TOP_MUX_MFG_52M, "mfg_52m_sel", mfg_52m_parents, + 0x0104, 1, 2), +}; + +static int mtk_topckgen_init(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + void __iomem *base; + struct device_node *node = pdev->dev.of_node; + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + clk_data = mtk_alloc_clk_data(CLK_TOP_NR); + + mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs), + clk_data); + + mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base, + &mt6797_clk_lock, clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static const struct mtk_gate_regs infra0_cg_regs = { + .set_ofs = 0x0080, + .clr_ofs = 0x0084, + .sta_ofs = 0x0090, +}; + +static const struct mtk_gate_regs infra1_cg_regs = { + .set_ofs = 0x0088, + .clr_ofs = 0x008c, + .sta_ofs = 0x0094, +}; + +static const struct mtk_gate_regs infra2_cg_regs = { + .set_ofs = 0x00a8, + .clr_ofs = 0x00ac, + .sta_ofs = 0x00b0, +}; + +#define GATE_ICG0(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &infra0_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ +} + +#define GATE_ICG1(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &infra1_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ +} + +#define GATE_ICG2(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &infra2_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ +} + +static const struct mtk_gate infra_clks[] = { + GATE_ICG0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr", "ulposc", 0), + GATE_ICG0(CLK_INFRA_PMIC_AP, "infra_pmic_ap", "pmicspi_sel", 1), + GATE_ICG0(CLK_INFRA_PMIC_MD, "infra_pmic_md", "pmicspi_sel", 2), + GATE_ICG0(CLK_INFRA_PMIC_CONN, "infra_pmic_conn", "pmicspi_sel", 3), + GATE_ICG0(CLK_INFRA_SCP, "infra_scp", "scp_sel", 4), + GATE_ICG0(CLK_INFRA_SEJ, "infra_sej", "axi_sel", 5), + GATE_ICG0(CLK_INFRA_APXGPT, "infra_apxgpt", "axi_sel", 6), + GATE_ICG0(CLK_INFRA_SEJ_13M, "infra_sej_13m", "clk26m", 7), + GATE_ICG0(CLK_INFRA_ICUSB, "infra_icusb", "usb20_sel", 8), + GATE_ICG0(CLK_INFRA_GCE, "infra_gce", "axi_sel", 9), + GATE_ICG0(CLK_INFRA_THERM, "infra_therm", "axi_sel", 10), + GATE_ICG0(CLK_INFRA_I2C0, "infra_i2c0", "axi_sel", 11), + GATE_ICG0(CLK_INFRA_I2C1, "infra_i2c1", "axi_sel", 12), + GATE_ICG0(CLK_INFRA_I2C2, "infra_i2c2", "axi_sel", 13), + GATE_ICG0(CLK_INFRA_I2C3, "infra_i2c3", "axi_sel", 14), + GATE_ICG0(CLK_INFRA_PWM_HCLK, "infra_pwm_hclk", "axi_sel", 15), + GATE_ICG0(CLK_INFRA_PWM1, "infra_pwm1", "axi_sel", 16), + GATE_ICG0(CLK_INFRA_PWM2, "infra_pwm2", "axi_sel", 17), + GATE_ICG0(CLK_INFRA_PWM3, "infra_pwm3", "axi_sel", 18), + GATE_ICG0(CLK_INFRA_PWM4, "infra_pwm4", "axi_sel", 19), + GATE_ICG0(CLK_INFRA_PWM, "infra_pwm", "axi_sel", 21), + GATE_ICG0(CLK_INFRA_UART0, "infra_uart0", "uart_sel", 22), + GATE_ICG0(CLK_INFRA_UART1, "infra_uart1", "uart_sel", 23), + GATE_ICG0(CLK_INFRA_UART2, "infra_uart2", "uart_sel", 24), + GATE_ICG0(CLK_INFRA_UART3, "infra_uart3", "uart_sel", 25), + GATE_ICG0(CLK_INFRA_MD2MD_CCIF_0, "infra_md2md_ccif_0", "axi_sel", 27), + GATE_ICG0(CLK_INFRA_MD2MD_CCIF_1, "infra_md2md_ccif_1", "axi_sel", 28), + GATE_ICG0(CLK_INFRA_MD2MD_CCIF_2, "infra_md2md_ccif_2", "axi_sel", 29), + GATE_ICG0(CLK_INFRA_FHCTL, "infra_fhctl", "clk26m", 30), + GATE_ICG0(CLK_INFRA_BTIF, "infra_btif", "axi_sel", 31), + GATE_ICG1(CLK_INFRA_MD2MD_CCIF_3, "infra_md2md_ccif_3", "axi_sel", 0), + GATE_ICG1(CLK_INFRA_SPI, "infra_spi", "spi_sel", 1), + GATE_ICG1(CLK_INFRA_MSDC0, "infra_msdc0", "msdc50_0_sel", 2), + GATE_ICG1(CLK_INFRA_MD2MD_CCIF_4, "infra_md2md_ccif_4", "axi_sel", 3), + GATE_ICG1(CLK_INFRA_MSDC1, "infra_msdc1", "msdc30_1_sel", 4), + GATE_ICG1(CLK_INFRA_MSDC2, "infra_msdc2", "msdc30_2_sel", 5), + GATE_ICG1(CLK_INFRA_MD2MD_CCIF_5, "infra_md2md_ccif_5", "axi_sel", 7), + GATE_ICG1(CLK_INFRA_GCPU, "infra_gcpu", "axi_sel", 8), + GATE_ICG1(CLK_INFRA_TRNG, "infra_trng", "axi_sel", 9), + GATE_ICG1(CLK_INFRA_AUXADC, "infra_auxadc", "clk26m", 10), + GATE_ICG1(CLK_INFRA_CPUM, "infra_cpum", "axi_sel", 11), + GATE_ICG1(CLK_INFRA_AP_C2K_CCIF_0, "infra_ap_c2k_ccif_0", + "axi_sel", 12), + GATE_ICG1(CLK_INFRA_AP_C2K_CCIF_1, "infra_ap_c2k_ccif_1", + "axi_sel", 13), + GATE_ICG1(CLK_INFRA_CLDMA, "infra_cldma", "axi_sel", 16), + GATE_ICG1(CLK_INFRA_DISP_PWM, "infra_disp_pwm", "pwm_sel", 17), + GATE_ICG1(CLK_INFRA_AP_DMA, "infra_ap_dma", "axi_sel", 18), + GATE_ICG1(CLK_INFRA_DEVICE_APC, "infra_device_apc", "axi_sel", 20), + GATE_ICG1(CLK_INFRA_L2C_SRAM, "infra_l2c_sram", "mm_sel", 22), + GATE_ICG1(CLK_INFRA_CCIF_AP, "infra_ccif_ap", "axi_sel", 23), + GATE_ICG1(CLK_INFRA_AUDIO, "infra_audio", "axi_sel", 25), + GATE_ICG1(CLK_INFRA_CCIF_MD, "infra_ccif_md", "axi_sel", 26), + GATE_ICG1(CLK_INFRA_DRAMC_F26M, "infra_dramc_f26m", "clk26m", 31), + GATE_ICG2(CLK_INFRA_I2C4, "infra_i2c4", "axi_sel", 0), + GATE_ICG2(CLK_INFRA_I2C_APPM, "infra_i2c_appm", "axi_sel", 1), + GATE_ICG2(CLK_INFRA_I2C_GPUPM, "infra_i2c_gpupm", "axi_sel", 2), + GATE_ICG2(CLK_INFRA_I2C2_IMM, "infra_i2c2_imm", "axi_sel", 3), + GATE_ICG2(CLK_INFRA_I2C2_ARB, "infra_i2c2_arb", "axi_sel", 4), + GATE_ICG2(CLK_INFRA_I2C3_IMM, "infra_i2c3_imm", "axi_sel", 5), + GATE_ICG2(CLK_INFRA_I2C3_ARB, "infra_i2c3_arb", "axi_sel", 6), + GATE_ICG2(CLK_INFRA_I2C5, "infra_i2c5", "axi_sel", 7), + GATE_ICG2(CLK_INFRA_SYS_CIRQ, "infra_sys_cirq", "axi_sel", 8), + GATE_ICG2(CLK_INFRA_SPI1, "infra_spi1", "spi_sel", 10), + GATE_ICG2(CLK_INFRA_DRAMC_B_F26M, "infra_dramc_b_f26m", "clk26m", 11), + GATE_ICG2(CLK_INFRA_ANC_MD32, "infra_anc_md32", "anc_md32_sel", 12), + GATE_ICG2(CLK_INFRA_ANC_MD32_32K, "infra_anc_md32_32k", "clk26m", 13), + GATE_ICG2(CLK_INFRA_DVFS_SPM1, "infra_dvfs_spm1", "axi_sel", 15), + GATE_ICG2(CLK_INFRA_AES_TOP0, "infra_aes_top0", "axi_sel", 16), + GATE_ICG2(CLK_INFRA_AES_TOP1, "infra_aes_top1", "axi_sel", 17), + GATE_ICG2(CLK_INFRA_SSUSB_BUS, "infra_ssusb_bus", "axi_sel", 18), + GATE_ICG2(CLK_INFRA_SPI2, "infra_spi2", "spi_sel", 19), + GATE_ICG2(CLK_INFRA_SPI3, "infra_spi3", "spi_sel", 20), + GATE_ICG2(CLK_INFRA_SPI4, "infra_spi4", "spi_sel", 21), + GATE_ICG2(CLK_INFRA_SPI5, "infra_spi5", "spi_sel", 22), + GATE_ICG2(CLK_INFRA_IRTX, "infra_irtx", "spi_sel", 23), + GATE_ICG2(CLK_INFRA_SSUSB_SYS, "infra_ssusb_sys", + "ssusb_top_sys_sel", 24), + GATE_ICG2(CLK_INFRA_SSUSB_REF, "infra_ssusb_ref", "clk26m", 9), + GATE_ICG2(CLK_INFRA_AUDIO_26M, "infra_audio_26m", "clk26m", 26), + GATE_ICG2(CLK_INFRA_AUDIO_26M_PAD_TOP, "infra_audio_26m_pad_top", + "clk26m", 27), + GATE_ICG2(CLK_INFRA_MODEM_TEMP_SHARE, "infra_modem_temp_share", + "axi_sel", 28), + GATE_ICG2(CLK_INFRA_VAD_WRAP_SOC, "infra_vad_wrap_soc", "axi_sel", 29), + GATE_ICG2(CLK_INFRA_DRAMC_CONF, "infra_dramc_conf", "axi_sel", 30), + GATE_ICG2(CLK_INFRA_DRAMC_B_CONF, "infra_dramc_b_conf", "axi_sel", 31), + GATE_ICG1(CLK_INFRA_MFG_VCG, "infra_mfg_vcg", "mfg_52m_sel", 14), +}; + +static const struct mtk_fixed_factor infra_fixed_divs[] = { + FACTOR(CLK_INFRA_13M, "clk13m", "clk26m", 1, 2), +}; + +static struct clk_onecell_data *infra_clk_data; + +static void mtk_infrasys_init_early(struct device_node *node) +{ + int r, i; + + if (!infra_clk_data) { + infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); + + for (i = 0; i < CLK_INFRA_NR; i++) + infra_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER); + } + + mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs), + infra_clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, infra_clk_data); + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); +} + +CLK_OF_DECLARE_DRIVER(mtk_infra, "mediatek,mt6797-infracfg", + mtk_infrasys_init_early); + +static int mtk_infrasys_init(struct platform_device *pdev) +{ + int r, i; + struct device_node *node = pdev->dev.of_node; + + if (!infra_clk_data) { + infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); + } else { + for (i = 0; i < CLK_INFRA_NR; i++) { + if (infra_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER)) + infra_clk_data->clks[i] = ERR_PTR(-ENOENT); + } + } + + mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks), + infra_clk_data); + mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs), + infra_clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, infra_clk_data); + if (r) + return r; + + return 0; +} + +#define MT6797_PLL_FMAX (3000UL * MHZ) + +#define CON0_MT6797_RST_BAR BIT(24) + +#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \ + _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \ + _pcw_shift, _div_table) { \ + .id = _id, \ + .name = _name, \ + .reg = _reg, \ + .pwr_reg = _pwr_reg, \ + .en_mask = _en_mask, \ + .flags = _flags, \ + .rst_bar_mask = CON0_MT6797_RST_BAR, \ + .fmax = MT6797_PLL_FMAX, \ + .pcwbits = _pcwbits, \ + .pd_reg = _pd_reg, \ + .pd_shift = _pd_shift, \ + .tuner_reg = _tuner_reg, \ + .pcw_reg = _pcw_reg, \ + .pcw_shift = _pcw_shift, \ + .div_table = _div_table, \ +} + +#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \ + _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \ + _pcw_shift) \ + PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \ + _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \ + NULL) + +static const struct mtk_pll_data plls[] = { + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0220, 0x022C, 0xF0000101, PLL_AO, + 21, 0x220, 4, 0x0, 0x224, 0), + PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0230, 0x023C, 0xFE000011, 0, 7, + 0x230, 4, 0x0, 0x234, 14), + PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x0240, 0x024C, 0x00000101, 0, 21, + 0x244, 24, 0x0, 0x244, 0), + PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0250, 0x025C, 0x00000121, 0, 21, + 0x250, 4, 0x0, 0x254, 0), + PLL(CLK_APMIXED_IMGPLL, "imgpll", 0x0260, 0x026C, 0x00000121, 0, 21, + 0x260, 4, 0x0, 0x264, 0), + PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0270, 0x027C, 0xC0000121, 0, 21, + 0x270, 4, 0x0, 0x274, 0), + PLL(CLK_APMIXED_CODECPLL, "codecpll", 0x0290, 0x029C, 0x00000121, 0, 21, + 0x290, 4, 0x0, 0x294, 0), + PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x02E4, 0x02F0, 0x00000121, 0, 21, + 0x2E4, 4, 0x0, 0x2E8, 0), + PLL(CLK_APMIXED_APLL1, "apll1", 0x02A0, 0x02B0, 0x00000131, 0, 31, + 0x2A0, 4, 0x2A8, 0x2A4, 0), + PLL(CLK_APMIXED_APLL2, "apll2", 0x02B4, 0x02C4, 0x00000131, 0, 31, + 0x2B4, 4, 0x2BC, 0x2B8, 0), +}; + +static int mtk_apmixedsys_init(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR); + if (!clk_data) + return -ENOMEM; + + mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static const struct of_device_id of_match_clk_mt6797[] = { + { + .compatible = "mediatek,mt6797-topckgen", + .data = mtk_topckgen_init, + }, { + .compatible = "mediatek,mt6797-infracfg", + .data = mtk_infrasys_init, + }, { + .compatible = "mediatek,mt6797-apmixedsys", + .data = mtk_apmixedsys_init, + }, { + /* sentinel */ + } +}; + +static int clk_mt6797_probe(struct platform_device *pdev) +{ + int (*clk_init)(struct platform_device *); + int r; + + clk_init = of_device_get_match_data(&pdev->dev); + if (!clk_init) + return -EINVAL; + + r = clk_init(pdev); + if (r) + dev_err(&pdev->dev, + "could not register clock provider: %s: %d\n", + pdev->name, r); + + return r; +} + +static struct platform_driver clk_mt6797_drv = { + .probe = clk_mt6797_probe, + .driver = { + .name = "clk-mt6797", + .of_match_table = of_match_clk_mt6797, + }, +}; + +static int __init clk_mt6797_init(void) +{ + return platform_driver_register(&clk_mt6797_drv); +} + +arch_initcall(clk_mt6797_init); diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile index 349583405b7c..83b6d9d65aa1 100644 --- a/drivers/clk/meson/Makefile +++ b/drivers/clk/meson/Makefile @@ -2,6 +2,6 @@ # Makefile for Meson specific clk # -obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-cpu.o clk-mpll.o +obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-cpu.o clk-mpll.o clk-audio-divider.o obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o diff --git a/drivers/clk/meson/clk-audio-divider.c b/drivers/clk/meson/clk-audio-divider.c new file mode 100644 index 000000000000..6c07db06642d --- /dev/null +++ b/drivers/clk/meson/clk-audio-divider.c @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2017 AmLogic, Inc. + * Author: Jerome Brunet <jbrunet@baylibre.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +/* + * i2s master clock divider: The algorithm of the generic clk-divider used with + * a very precise clock parent such as the mpll tends to select a low divider + * factor. This gives poor results with this particular divider, especially with + * high frequencies (> 100 MHz) + * + * This driver try to select the maximum possible divider with the rate the + * upstream clock can provide. + */ + +#include <linux/clk-provider.h> +#include "clkc.h" + +#define to_meson_clk_audio_divider(_hw) container_of(_hw, \ + struct meson_clk_audio_divider, hw) + +static int _div_round(unsigned long parent_rate, unsigned long rate, + unsigned long flags) +{ + if (flags & CLK_DIVIDER_ROUND_CLOSEST) + return DIV_ROUND_CLOSEST_ULL((u64)parent_rate, rate); + + return DIV_ROUND_UP_ULL((u64)parent_rate, rate); +} + +static int _get_val(unsigned long parent_rate, unsigned long rate) +{ + return DIV_ROUND_UP_ULL((u64)parent_rate, rate) - 1; +} + +static int _valid_divider(struct clk_hw *hw, int divider) +{ + struct meson_clk_audio_divider *adiv = + to_meson_clk_audio_divider(hw); + int max_divider; + u8 width; + + width = adiv->div.width; + max_divider = 1 << width; + + return clamp(divider, 1, max_divider); +} + +static unsigned long audio_divider_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct meson_clk_audio_divider *adiv = + to_meson_clk_audio_divider(hw); + struct parm *p; + unsigned long reg, divider; + + p = &adiv->div; + reg = readl(adiv->base + p->reg_off); + divider = PARM_GET(p->width, p->shift, reg) + 1; + + return DIV_ROUND_UP_ULL((u64)parent_rate, divider); +} + +static long audio_divider_round_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *parent_rate) +{ + struct meson_clk_audio_divider *adiv = + to_meson_clk_audio_divider(hw); + unsigned long max_prate; + int divider; + + if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) { + divider = _div_round(*parent_rate, rate, adiv->flags); + divider = _valid_divider(hw, divider); + return DIV_ROUND_UP_ULL((u64)*parent_rate, divider); + } + + /* Get the maximum parent rate */ + max_prate = clk_hw_round_rate(clk_hw_get_parent(hw), ULONG_MAX); + + /* Get the corresponding rounded down divider */ + divider = max_prate / rate; + divider = _valid_divider(hw, divider); + + /* Get actual rate of the parent */ + *parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), + divider * rate); + + return DIV_ROUND_UP_ULL((u64)*parent_rate, divider); +} + +static int audio_divider_set_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long parent_rate) +{ + struct meson_clk_audio_divider *adiv = + to_meson_clk_audio_divider(hw); + struct parm *p; + unsigned long reg, flags = 0; + int val; + + val = _get_val(parent_rate, rate); + + if (adiv->lock) + spin_lock_irqsave(adiv->lock, flags); + else + __acquire(adiv->lock); + + p = &adiv->div; + reg = readl(adiv->base + p->reg_off); + reg = PARM_SET(p->width, p->shift, reg, val); + writel(reg, adiv->base + p->reg_off); + + if (adiv->lock) + spin_unlock_irqrestore(adiv->lock, flags); + else + __release(adiv->lock); + + return 0; +} + +const struct clk_ops meson_clk_audio_divider_ro_ops = { + .recalc_rate = audio_divider_recalc_rate, + .round_rate = audio_divider_round_rate, +}; + +const struct clk_ops meson_clk_audio_divider_ops = { + .recalc_rate = audio_divider_recalc_rate, + .round_rate = audio_divider_round_rate, + .set_rate = audio_divider_set_rate, +}; diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c index 03af79005ddb..39eab69fe51a 100644 --- a/drivers/clk/meson/clk-mpll.c +++ b/drivers/clk/meson/clk-mpll.c @@ -64,17 +64,51 @@ #include <linux/clk-provider.h> #include "clkc.h" -#define SDM_MAX 16384 +#define SDM_DEN 16384 +#define N2_MIN 4 +#define N2_MAX 511 #define to_meson_clk_mpll(_hw) container_of(_hw, struct meson_clk_mpll, hw) +static long rate_from_params(unsigned long parent_rate, + unsigned long sdm, + unsigned long n2) +{ + unsigned long divisor = (SDM_DEN * n2) + sdm; + + if (n2 < N2_MIN) + return -EINVAL; + + return DIV_ROUND_UP_ULL((u64)parent_rate * SDM_DEN, divisor); +} + +static void params_from_rate(unsigned long requested_rate, + unsigned long parent_rate, + unsigned long *sdm, + unsigned long *n2) +{ + uint64_t div = parent_rate; + unsigned long rem = do_div(div, requested_rate); + + if (div < N2_MIN) { + *n2 = N2_MIN; + *sdm = 0; + } else if (div > N2_MAX) { + *n2 = N2_MAX; + *sdm = SDM_DEN - 1; + } else { + *n2 = div; + *sdm = DIV_ROUND_UP(rem * SDM_DEN, requested_rate); + } +} + static unsigned long mpll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct meson_clk_mpll *mpll = to_meson_clk_mpll(hw); struct parm *p; - unsigned long rate = 0; unsigned long reg, sdm, n2; + long rate; p = &mpll->sdm; reg = readl(mpll->base + p->reg_off); @@ -84,11 +118,123 @@ static unsigned long mpll_recalc_rate(struct clk_hw *hw, reg = readl(mpll->base + p->reg_off); n2 = PARM_GET(p->width, p->shift, reg); - rate = (parent_rate * SDM_MAX) / ((SDM_MAX * n2) + sdm); + rate = rate_from_params(parent_rate, sdm, n2); + if (rate < 0) + return 0; return rate; } +static long mpll_round_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *parent_rate) +{ + unsigned long sdm, n2; + + params_from_rate(rate, *parent_rate, &sdm, &n2); + return rate_from_params(*parent_rate, sdm, n2); +} + +static int mpll_set_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long parent_rate) +{ + struct meson_clk_mpll *mpll = to_meson_clk_mpll(hw); + struct parm *p; + unsigned long reg, sdm, n2; + unsigned long flags = 0; + + params_from_rate(rate, parent_rate, &sdm, &n2); + + if (mpll->lock) + spin_lock_irqsave(mpll->lock, flags); + else + __acquire(mpll->lock); + + p = &mpll->sdm; + reg = readl(mpll->base + p->reg_off); + reg = PARM_SET(p->width, p->shift, reg, sdm); + writel(reg, mpll->base + p->reg_off); + + p = &mpll->sdm_en; + reg = readl(mpll->base + p->reg_off); + reg = PARM_SET(p->width, p->shift, reg, 1); + writel(reg, mpll->base + p->reg_off); + + p = &mpll->n2; + reg = readl(mpll->base + p->reg_off); + reg = PARM_SET(p->width, p->shift, reg, n2); + writel(reg, mpll->base + p->reg_off); + + if (mpll->lock) + spin_unlock_irqrestore(mpll->lock, flags); + else + __release(mpll->lock); + + return 0; +} + +static void mpll_enable_core(struct clk_hw *hw, int enable) +{ + struct meson_clk_mpll *mpll = to_meson_clk_mpll(hw); + struct parm *p; + unsigned long reg; + unsigned long flags = 0; + + if (mpll->lock) + spin_lock_irqsave(mpll->lock, flags); + else + __acquire(mpll->lock); + + p = &mpll->en; + reg = readl(mpll->base + p->reg_off); + reg = PARM_SET(p->width, p->shift, reg, enable ? 1 : 0); + writel(reg, mpll->base + p->reg_off); + + if (mpll->lock) + spin_unlock_irqrestore(mpll->lock, flags); + else + __release(mpll->lock); +} + + +static int mpll_enable(struct clk_hw *hw) +{ + mpll_enable_core(hw, 1); + + return 0; +} + +static void mpll_disable(struct clk_hw *hw) +{ + mpll_enable_core(hw, 0); +} + +static int mpll_is_enabled(struct clk_hw *hw) +{ + struct meson_clk_mpll *mpll = to_meson_clk_mpll(hw); + struct parm *p; + unsigned long reg; + int en; + + p = &mpll->en; + reg = readl(mpll->base + p->reg_off); + en = PARM_GET(p->width, p->shift, reg); + + return en; +} + const struct clk_ops meson_clk_mpll_ro_ops = { - .recalc_rate = mpll_recalc_rate, + .recalc_rate = mpll_recalc_rate, + .round_rate = mpll_round_rate, + .is_enabled = mpll_is_enabled, +}; + +const struct clk_ops meson_clk_mpll_ops = { + .recalc_rate = mpll_recalc_rate, + .round_rate = mpll_round_rate, + .set_rate = mpll_set_rate, + .enable = mpll_enable, + .disable = mpll_disable, + .is_enabled = mpll_is_enabled, }; diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c index 4adc1e89212c..01341553f50b 100644 --- a/drivers/clk/meson/clk-pll.c +++ b/drivers/clk/meson/clk-pll.c @@ -116,6 +116,30 @@ static const struct pll_rate_table *meson_clk_get_pll_settings(struct meson_clk_ return NULL; } +/* Specific wait loop for GXL/GXM GP0 PLL */ +static int meson_clk_pll_wait_lock_reset(struct meson_clk_pll *pll, + struct parm *p_n) +{ + int delay = 100; + u32 reg; + + while (delay > 0) { + reg = readl(pll->base + p_n->reg_off); + writel(reg | MESON_PLL_RESET, pll->base + p_n->reg_off); + udelay(10); + writel(reg & ~MESON_PLL_RESET, pll->base + p_n->reg_off); + + /* This delay comes from AMLogic tree clk-gp0-gxl driver */ + mdelay(1); + + reg = readl(pll->base + p_n->reg_off); + if (reg & MESON_PLL_LOCK) + return 0; + delay--; + } + return -ETIMEDOUT; +} + static int meson_clk_pll_wait_lock(struct meson_clk_pll *pll, struct parm *p_n) { @@ -132,6 +156,15 @@ static int meson_clk_pll_wait_lock(struct meson_clk_pll *pll, return -ETIMEDOUT; } +static void meson_clk_pll_init_params(struct meson_clk_pll *pll) +{ + int i; + + for (i = 0 ; i < pll->params.params_count ; ++i) + writel(pll->params.params_table[i].value, + pll->base + pll->params.params_table[i].reg_off); +} + static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { @@ -151,10 +184,16 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, if (!rate_set) return -EINVAL; + /* Initialize the PLL in a clean state if specified */ + if (pll->params.params_count) + meson_clk_pll_init_params(pll); + /* PLL reset */ p = &pll->n; reg = readl(pll->base + p->reg_off); - writel(reg | MESON_PLL_RESET, pll->base + p->reg_off); + /* If no_init_reset is provided, avoid resetting at this point */ + if (!pll->params.no_init_reset) + writel(reg | MESON_PLL_RESET, pll->base + p->reg_off); reg = PARM_SET(p->width, p->shift, reg, rate_set->n); writel(reg, pll->base + p->reg_off); @@ -184,7 +223,17 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, } p = &pll->n; - ret = meson_clk_pll_wait_lock(pll, p); + /* If clear_reset_for_lock is provided, remove the reset bit here */ + if (pll->params.clear_reset_for_lock) { + reg = readl(pll->base + p->reg_off); + writel(reg & ~MESON_PLL_RESET, pll->base + p->reg_off); + } + + /* If reset_lock_loop, use a special loop including resetting */ + if (pll->params.reset_lock_loop) + ret = meson_clk_pll_wait_lock_reset(pll, p); + else + ret = meson_clk_pll_wait_lock(pll, p); if (ret) { pr_warn("%s: pll did not lock, trying to restore old rate %lu\n", __func__, old_rate); diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h index 9bb70e7a7d6a..d6feafe8bd6c 100644 --- a/drivers/clk/meson/clkc.h +++ b/drivers/clk/meson/clkc.h @@ -25,7 +25,7 @@ #define PARM_GET(width, shift, reg) \ (((reg) & SETPMASK(width, shift)) >> (shift)) #define PARM_SET(width, shift, reg, val) \ - (((reg) & CLRPMASK(width, shift)) | (val << (shift))) + (((reg) & CLRPMASK(width, shift)) | ((val) << (shift))) #define MESON_PARM_APPLICABLE(p) (!!((p)->width)) @@ -62,6 +62,28 @@ struct pll_rate_table { .frac = (_frac), \ } \ +struct pll_params_table { + unsigned int reg_off; + unsigned int value; +}; + +#define PLL_PARAM(_reg, _val) \ + { \ + .reg_off = (_reg), \ + .value = (_val), \ + } + +struct pll_setup_params { + struct pll_params_table *params_table; + unsigned int params_count; + /* Workaround for GP0, do not reset before configuring */ + bool no_init_reset; + /* Workaround for GP0, unreset right before checking for lock */ + bool clear_reset_for_lock; + /* Workaround for GXL GP0, reset in the lock checking loop */ + bool reset_lock_loop; +}; + struct meson_clk_pll { struct clk_hw hw; void __iomem *base; @@ -70,6 +92,7 @@ struct meson_clk_pll { struct parm frac; struct parm od; struct parm od2; + const struct pll_setup_params params; const struct pll_rate_table *rate_table; unsigned int rate_count; spinlock_t *lock; @@ -92,8 +115,17 @@ struct meson_clk_mpll { struct clk_hw hw; void __iomem *base; struct parm sdm; + struct parm sdm_en; struct parm n2; - /* FIXME ssen gate control? */ + struct parm en; + spinlock_t *lock; +}; + +struct meson_clk_audio_divider { + struct clk_hw hw; + void __iomem *base; + struct parm div; + u8 flags; spinlock_t *lock; }; @@ -116,5 +148,8 @@ extern const struct clk_ops meson_clk_pll_ro_ops; extern const struct clk_ops meson_clk_pll_ops; extern const struct clk_ops meson_clk_cpu_ops; extern const struct clk_ops meson_clk_mpll_ro_ops; +extern const struct clk_ops meson_clk_mpll_ops; +extern const struct clk_ops meson_clk_audio_divider_ro_ops; +extern const struct clk_ops meson_clk_audio_divider_ops; #endif /* __CLKC_H */ diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 1c1ec137a3cc..ad5f027af1a2 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -20,6 +20,7 @@ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/of_address.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/init.h> @@ -120,7 +121,7 @@ static const struct pll_rate_table sys_pll_rate_table[] = { { /* sentinel */ }, }; -static const struct pll_rate_table gp0_pll_rate_table[] = { +static const struct pll_rate_table gxbb_gp0_pll_rate_table[] = { PLL_RATE(96000000, 32, 1, 3), PLL_RATE(99000000, 33, 1, 3), PLL_RATE(102000000, 34, 1, 3), @@ -248,6 +249,35 @@ static const struct pll_rate_table gp0_pll_rate_table[] = { { /* sentinel */ }, }; +static const struct pll_rate_table gxl_gp0_pll_rate_table[] = { + PLL_RATE(504000000, 42, 1, 1), + PLL_RATE(516000000, 43, 1, 1), + PLL_RATE(528000000, 44, 1, 1), + PLL_RATE(540000000, 45, 1, 1), + PLL_RATE(552000000, 46, 1, 1), + PLL_RATE(564000000, 47, 1, 1), + PLL_RATE(576000000, 48, 1, 1), + PLL_RATE(588000000, 49, 1, 1), + PLL_RATE(600000000, 50, 1, 1), + PLL_RATE(612000000, 51, 1, 1), + PLL_RATE(624000000, 52, 1, 1), + PLL_RATE(636000000, 53, 1, 1), + PLL_RATE(648000000, 54, 1, 1), + PLL_RATE(660000000, 55, 1, 1), + PLL_RATE(672000000, 56, 1, 1), + PLL_RATE(684000000, 57, 1, 1), + PLL_RATE(696000000, 58, 1, 1), + PLL_RATE(708000000, 59, 1, 1), + PLL_RATE(720000000, 60, 1, 1), + PLL_RATE(732000000, 61, 1, 1), + PLL_RATE(744000000, 62, 1, 1), + PLL_RATE(756000000, 63, 1, 1), + PLL_RATE(768000000, 64, 1, 1), + PLL_RATE(780000000, 65, 1, 1), + PLL_RATE(792000000, 66, 1, 1), + { /* sentinel */ }, +}; + static const struct clk_div_table cpu_div_table[] = { { .val = 1, .div = 1 }, { .val = 2, .div = 2 }, @@ -352,6 +382,13 @@ static struct meson_clk_pll gxbb_sys_pll = { }, }; +struct pll_params_table gxbb_gp0_params_table[] = { + PLL_PARAM(HHI_GP0_PLL_CNTL, 0x6a000228), + PLL_PARAM(HHI_GP0_PLL_CNTL2, 0x69c80000), + PLL_PARAM(HHI_GP0_PLL_CNTL3, 0x0a5590c4), + PLL_PARAM(HHI_GP0_PLL_CNTL4, 0x0000500d), +}; + static struct meson_clk_pll gxbb_gp0_pll = { .m = { .reg_off = HHI_GP0_PLL_CNTL, @@ -368,8 +405,57 @@ static struct meson_clk_pll gxbb_gp0_pll = { .shift = 16, .width = 2, }, - .rate_table = gp0_pll_rate_table, - .rate_count = ARRAY_SIZE(gp0_pll_rate_table), + .params = { + .params_table = gxbb_gp0_params_table, + .params_count = ARRAY_SIZE(gxbb_gp0_params_table), + .no_init_reset = true, + .clear_reset_for_lock = true, + }, + .rate_table = gxbb_gp0_pll_rate_table, + .rate_count = ARRAY_SIZE(gxbb_gp0_pll_rate_table), + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "gp0_pll", + .ops = &meson_clk_pll_ops, + .parent_names = (const char *[]){ "xtal" }, + .num_parents = 1, + .flags = CLK_GET_RATE_NOCACHE, + }, +}; + +struct pll_params_table gxl_gp0_params_table[] = { + PLL_PARAM(HHI_GP0_PLL_CNTL, 0x40010250), + PLL_PARAM(HHI_GP0_PLL_CNTL1, 0xc084a000), + PLL_PARAM(HHI_GP0_PLL_CNTL2, 0xb75020be), + PLL_PARAM(HHI_GP0_PLL_CNTL3, 0x0a59a288), + PLL_PARAM(HHI_GP0_PLL_CNTL4, 0xc000004d), + PLL_PARAM(HHI_GP0_PLL_CNTL5, 0x00078000), +}; + +static struct meson_clk_pll gxl_gp0_pll = { + .m = { + .reg_off = HHI_GP0_PLL_CNTL, + .shift = 0, + .width = 9, + }, + .n = { + .reg_off = HHI_GP0_PLL_CNTL, + .shift = 9, + .width = 5, + }, + .od = { + .reg_off = HHI_GP0_PLL_CNTL, + .shift = 16, + .width = 2, + }, + .params = { + .params_table = gxl_gp0_params_table, + .params_count = ARRAY_SIZE(gxl_gp0_params_table), + .no_init_reset = true, + .reset_lock_loop = true, + }, + .rate_table = gxl_gp0_pll_rate_table, + .rate_count = ARRAY_SIZE(gxl_gp0_pll_rate_table), .lock = &clk_lock, .hw.init = &(struct clk_init_data){ .name = "gp0_pll", @@ -441,15 +527,25 @@ static struct meson_clk_mpll gxbb_mpll0 = { .shift = 0, .width = 14, }, + .sdm_en = { + .reg_off = HHI_MPLL_CNTL7, + .shift = 15, + .width = 1, + }, .n2 = { .reg_off = HHI_MPLL_CNTL7, .shift = 16, .width = 9, }, + .en = { + .reg_off = HHI_MPLL_CNTL7, + .shift = 14, + .width = 1, + }, .lock = &clk_lock, .hw.init = &(struct clk_init_data){ .name = "mpll0", - .ops = &meson_clk_mpll_ro_ops, + .ops = &meson_clk_mpll_ops, .parent_names = (const char *[]){ "fixed_pll" }, .num_parents = 1, }, @@ -461,15 +557,25 @@ static struct meson_clk_mpll gxbb_mpll1 = { .shift = 0, .width = 14, }, + .sdm_en = { + .reg_off = HHI_MPLL_CNTL8, + .shift = 15, + .width = 1, + }, .n2 = { .reg_off = HHI_MPLL_CNTL8, .shift = 16, .width = 9, }, + .en = { + .reg_off = HHI_MPLL_CNTL8, + .shift = 14, + .width = 1, + }, .lock = &clk_lock, .hw.init = &(struct clk_init_data){ .name = "mpll1", - .ops = &meson_clk_mpll_ro_ops, + .ops = &meson_clk_mpll_ops, .parent_names = (const char *[]){ "fixed_pll" }, .num_parents = 1, }, @@ -481,15 +587,25 @@ static struct meson_clk_mpll gxbb_mpll2 = { .shift = 0, .width = 14, }, + .sdm_en = { + .reg_off = HHI_MPLL_CNTL9, + .shift = 15, + .width = 1, + }, .n2 = { .reg_off = HHI_MPLL_CNTL9, .shift = 16, .width = 9, }, + .en = { + .reg_off = HHI_MPLL_CNTL9, + .shift = 14, + .width = 1, + }, .lock = &clk_lock, .hw.init = &(struct clk_init_data){ .name = "mpll2", - .ops = &meson_clk_mpll_ro_ops, + .ops = &meson_clk_mpll_ops, .parent_names = (const char *[]){ "fixed_pll" }, .num_parents = 1, }, @@ -604,6 +720,237 @@ static struct clk_gate gxbb_sar_adc_clk = { }, }; +/* + * The MALI IP is clocked by two identical clocks (mali_0 and mali_1) + * muxed by a glitch-free switch. + */ + +static u32 mux_table_mali_0_1[] = {0, 1, 2, 3, 4, 5, 6, 7}; +static const char *gxbb_mali_0_1_parent_names[] = { + "xtal", "gp0_pll", "mpll2", "mpll1", "fclk_div7", + "fclk_div4", "fclk_div3", "fclk_div5" +}; + +static struct clk_mux gxbb_mali_0_sel = { + .reg = (void *)HHI_MALI_CLK_CNTL, + .mask = 0x7, + .shift = 9, + .table = mux_table_mali_0_1, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "mali_0_sel", + .ops = &clk_mux_ops, + /* + * bits 10:9 selects from 8 possible parents: + * xtal, gp0_pll, mpll2, mpll1, fclk_div7, + * fclk_div4, fclk_div3, fclk_div5 + */ + .parent_names = gxbb_mali_0_1_parent_names, + .num_parents = 8, + .flags = CLK_SET_RATE_NO_REPARENT, + }, +}; + +static struct clk_divider gxbb_mali_0_div = { + .reg = (void *)HHI_MALI_CLK_CNTL, + .shift = 0, + .width = 7, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "mali_0_div", + .ops = &clk_divider_ops, + .parent_names = (const char *[]){ "mali_0_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_NO_REPARENT, + }, +}; + +static struct clk_gate gxbb_mali_0 = { + .reg = (void *)HHI_MALI_CLK_CNTL, + .bit_idx = 8, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "mali_0", + .ops = &clk_gate_ops, + .parent_names = (const char *[]){ "mali_0_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_mux gxbb_mali_1_sel = { + .reg = (void *)HHI_MALI_CLK_CNTL, + .mask = 0x7, + .shift = 25, + .table = mux_table_mali_0_1, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "mali_1_sel", + .ops = &clk_mux_ops, + /* + * bits 10:9 selects from 8 possible parents: + * xtal, gp0_pll, mpll2, mpll1, fclk_div7, + * fclk_div4, fclk_div3, fclk_div5 + */ + .parent_names = gxbb_mali_0_1_parent_names, + .num_parents = 8, + .flags = CLK_SET_RATE_NO_REPARENT, + }, +}; + +static struct clk_divider gxbb_mali_1_div = { + .reg = (void *)HHI_MALI_CLK_CNTL, + .shift = 16, + .width = 7, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "mali_1_div", + .ops = &clk_divider_ops, + .parent_names = (const char *[]){ "mali_1_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_NO_REPARENT, + }, +}; + +static struct clk_gate gxbb_mali_1 = { + .reg = (void *)HHI_MALI_CLK_CNTL, + .bit_idx = 24, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "mali_1", + .ops = &clk_gate_ops, + .parent_names = (const char *[]){ "mali_1_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static u32 mux_table_mali[] = {0, 1}; +static const char *gxbb_mali_parent_names[] = { + "mali_0", "mali_1" +}; + +static struct clk_mux gxbb_mali = { + .reg = (void *)HHI_MALI_CLK_CNTL, + .mask = 1, + .shift = 31, + .table = mux_table_mali, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "mali", + .ops = &clk_mux_ops, + .parent_names = gxbb_mali_parent_names, + .num_parents = 2, + .flags = CLK_SET_RATE_NO_REPARENT, + }, +}; + +static struct clk_mux gxbb_cts_amclk_sel = { + .reg = (void *) HHI_AUD_CLK_CNTL, + .mask = 0x3, + .shift = 9, + /* Default parent unknown (register reset value: 0) */ + .table = (u32[]){ 1, 2, 3 }, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "cts_amclk_sel", + .ops = &clk_mux_ops, + .parent_names = (const char *[]){ "mpll0", "mpll1", "mpll2" }, + .num_parents = 3, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct meson_clk_audio_divider gxbb_cts_amclk_div = { + .div = { + .reg_off = HHI_AUD_CLK_CNTL, + .shift = 0, + .width = 8, + }, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "cts_amclk_div", + .ops = &meson_clk_audio_divider_ops, + .parent_names = (const char *[]){ "cts_amclk_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST, + }, +}; + +static struct clk_gate gxbb_cts_amclk = { + .reg = (void *) HHI_AUD_CLK_CNTL, + .bit_idx = 8, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "cts_amclk", + .ops = &clk_gate_ops, + .parent_names = (const char *[]){ "cts_amclk_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_mux gxbb_cts_mclk_i958_sel = { + .reg = (void *)HHI_AUD_CLK_CNTL2, + .mask = 0x3, + .shift = 25, + /* Default parent unknown (register reset value: 0) */ + .table = (u32[]){ 1, 2, 3 }, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "cts_mclk_i958_sel", + .ops = &clk_mux_ops, + .parent_names = (const char *[]){ "mpll0", "mpll1", "mpll2" }, + .num_parents = 3, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_divider gxbb_cts_mclk_i958_div = { + .reg = (void *)HHI_AUD_CLK_CNTL2, + .shift = 16, + .width = 8, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "cts_mclk_i958_div", + .ops = &clk_divider_ops, + .parent_names = (const char *[]){ "cts_mclk_i958_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST, + }, +}; + +static struct clk_gate gxbb_cts_mclk_i958 = { + .reg = (void *)HHI_AUD_CLK_CNTL2, + .bit_idx = 24, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "cts_mclk_i958", + .ops = &clk_gate_ops, + .parent_names = (const char *[]){ "cts_mclk_i958_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_mux gxbb_cts_i958 = { + .reg = (void *)HHI_AUD_CLK_CNTL2, + .mask = 0x1, + .shift = 27, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "cts_i958", + .ops = &clk_mux_ops, + .parent_names = (const char *[]){ "cts_amclk", "cts_mclk_i958" }, + .num_parents = 2, + /* + *The parent is specific to origin of the audio data. Let the + * consumer choose the appropriate parent + */ + .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + }, +}; + /* Everything Else (EE) domain gates */ static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0); static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1); @@ -797,6 +1144,140 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = { [CLKID_SAR_ADC_CLK] = &gxbb_sar_adc_clk.hw, [CLKID_SAR_ADC_SEL] = &gxbb_sar_adc_clk_sel.hw, [CLKID_SAR_ADC_DIV] = &gxbb_sar_adc_clk_div.hw, + [CLKID_MALI_0_SEL] = &gxbb_mali_0_sel.hw, + [CLKID_MALI_0_DIV] = &gxbb_mali_0_div.hw, + [CLKID_MALI_0] = &gxbb_mali_0.hw, + [CLKID_MALI_1_SEL] = &gxbb_mali_1_sel.hw, + [CLKID_MALI_1_DIV] = &gxbb_mali_1_div.hw, + [CLKID_MALI_1] = &gxbb_mali_1.hw, + [CLKID_MALI] = &gxbb_mali.hw, + [CLKID_CTS_AMCLK] = &gxbb_cts_amclk.hw, + [CLKID_CTS_AMCLK_SEL] = &gxbb_cts_amclk_sel.hw, + [CLKID_CTS_AMCLK_DIV] = &gxbb_cts_amclk_div.hw, + [CLKID_CTS_MCLK_I958] = &gxbb_cts_mclk_i958.hw, + [CLKID_CTS_MCLK_I958_SEL] = &gxbb_cts_mclk_i958_sel.hw, + [CLKID_CTS_MCLK_I958_DIV] = &gxbb_cts_mclk_i958_div.hw, + [CLKID_CTS_I958] = &gxbb_cts_i958.hw, + }, + .num = NR_CLKS, +}; + +static struct clk_hw_onecell_data gxl_hw_onecell_data = { + .hws = { + [CLKID_SYS_PLL] = &gxbb_sys_pll.hw, + [CLKID_CPUCLK] = &gxbb_cpu_clk.hw, + [CLKID_HDMI_PLL] = &gxbb_hdmi_pll.hw, + [CLKID_FIXED_PLL] = &gxbb_fixed_pll.hw, + [CLKID_FCLK_DIV2] = &gxbb_fclk_div2.hw, + [CLKID_FCLK_DIV3] = &gxbb_fclk_div3.hw, + [CLKID_FCLK_DIV4] = &gxbb_fclk_div4.hw, + [CLKID_FCLK_DIV5] = &gxbb_fclk_div5.hw, + [CLKID_FCLK_DIV7] = &gxbb_fclk_div7.hw, + [CLKID_GP0_PLL] = &gxl_gp0_pll.hw, + [CLKID_MPEG_SEL] = &gxbb_mpeg_clk_sel.hw, + [CLKID_MPEG_DIV] = &gxbb_mpeg_clk_div.hw, + [CLKID_CLK81] = &gxbb_clk81.hw, + [CLKID_MPLL0] = &gxbb_mpll0.hw, + [CLKID_MPLL1] = &gxbb_mpll1.hw, + [CLKID_MPLL2] = &gxbb_mpll2.hw, + [CLKID_DDR] = &gxbb_ddr.hw, + [CLKID_DOS] = &gxbb_dos.hw, + [CLKID_ISA] = &gxbb_isa.hw, + [CLKID_PL301] = &gxbb_pl301.hw, + [CLKID_PERIPHS] = &gxbb_periphs.hw, + [CLKID_SPICC] = &gxbb_spicc.hw, + [CLKID_I2C] = &gxbb_i2c.hw, + [CLKID_SAR_ADC] = &gxbb_sar_adc.hw, + [CLKID_SMART_CARD] = &gxbb_smart_card.hw, + [CLKID_RNG0] = &gxbb_rng0.hw, + [CLKID_UART0] = &gxbb_uart0.hw, + [CLKID_SDHC] = &gxbb_sdhc.hw, + [CLKID_STREAM] = &gxbb_stream.hw, + [CLKID_ASYNC_FIFO] = &gxbb_async_fifo.hw, + [CLKID_SDIO] = &gxbb_sdio.hw, + [CLKID_ABUF] = &gxbb_abuf.hw, + [CLKID_HIU_IFACE] = &gxbb_hiu_iface.hw, + [CLKID_ASSIST_MISC] = &gxbb_assist_misc.hw, + [CLKID_SPI] = &gxbb_spi.hw, + [CLKID_I2S_SPDIF] = &gxbb_i2s_spdif.hw, + [CLKID_ETH] = &gxbb_eth.hw, + [CLKID_DEMUX] = &gxbb_demux.hw, + [CLKID_AIU_GLUE] = &gxbb_aiu_glue.hw, + [CLKID_IEC958] = &gxbb_iec958.hw, + [CLKID_I2S_OUT] = &gxbb_i2s_out.hw, + [CLKID_AMCLK] = &gxbb_amclk.hw, + [CLKID_AIFIFO2] = &gxbb_aififo2.hw, + [CLKID_MIXER] = &gxbb_mixer.hw, + [CLKID_MIXER_IFACE] = &gxbb_mixer_iface.hw, + [CLKID_ADC] = &gxbb_adc.hw, + [CLKID_BLKMV] = &gxbb_blkmv.hw, + [CLKID_AIU] = &gxbb_aiu.hw, + [CLKID_UART1] = &gxbb_uart1.hw, + [CLKID_G2D] = &gxbb_g2d.hw, + [CLKID_USB0] = &gxbb_usb0.hw, + [CLKID_USB1] = &gxbb_usb1.hw, + [CLKID_RESET] = &gxbb_reset.hw, + [CLKID_NAND] = &gxbb_nand.hw, + [CLKID_DOS_PARSER] = &gxbb_dos_parser.hw, + [CLKID_USB] = &gxbb_usb.hw, + [CLKID_VDIN1] = &gxbb_vdin1.hw, + [CLKID_AHB_ARB0] = &gxbb_ahb_arb0.hw, + [CLKID_EFUSE] = &gxbb_efuse.hw, + [CLKID_BOOT_ROM] = &gxbb_boot_rom.hw, + [CLKID_AHB_DATA_BUS] = &gxbb_ahb_data_bus.hw, + [CLKID_AHB_CTRL_BUS] = &gxbb_ahb_ctrl_bus.hw, + [CLKID_HDMI_INTR_SYNC] = &gxbb_hdmi_intr_sync.hw, + [CLKID_HDMI_PCLK] = &gxbb_hdmi_pclk.hw, + [CLKID_USB1_DDR_BRIDGE] = &gxbb_usb1_ddr_bridge.hw, + [CLKID_USB0_DDR_BRIDGE] = &gxbb_usb0_ddr_bridge.hw, + [CLKID_MMC_PCLK] = &gxbb_mmc_pclk.hw, + [CLKID_DVIN] = &gxbb_dvin.hw, + [CLKID_UART2] = &gxbb_uart2.hw, + [CLKID_SANA] = &gxbb_sana.hw, + [CLKID_VPU_INTR] = &gxbb_vpu_intr.hw, + [CLKID_SEC_AHB_AHB3_BRIDGE] = &gxbb_sec_ahb_ahb3_bridge.hw, + [CLKID_CLK81_A53] = &gxbb_clk81_a53.hw, + [CLKID_VCLK2_VENCI0] = &gxbb_vclk2_venci0.hw, + [CLKID_VCLK2_VENCI1] = &gxbb_vclk2_venci1.hw, + [CLKID_VCLK2_VENCP0] = &gxbb_vclk2_vencp0.hw, + [CLKID_VCLK2_VENCP1] = &gxbb_vclk2_vencp1.hw, + [CLKID_GCLK_VENCI_INT0] = &gxbb_gclk_venci_int0.hw, + [CLKID_GCLK_VENCI_INT] = &gxbb_gclk_vencp_int.hw, + [CLKID_DAC_CLK] = &gxbb_dac_clk.hw, + [CLKID_AOCLK_GATE] = &gxbb_aoclk_gate.hw, + [CLKID_IEC958_GATE] = &gxbb_iec958_gate.hw, + [CLKID_ENC480P] = &gxbb_enc480p.hw, + [CLKID_RNG1] = &gxbb_rng1.hw, + [CLKID_GCLK_VENCI_INT1] = &gxbb_gclk_venci_int1.hw, + [CLKID_VCLK2_VENCLMCC] = &gxbb_vclk2_venclmcc.hw, + [CLKID_VCLK2_VENCL] = &gxbb_vclk2_vencl.hw, + [CLKID_VCLK_OTHER] = &gxbb_vclk_other.hw, + [CLKID_EDP] = &gxbb_edp.hw, + [CLKID_AO_MEDIA_CPU] = &gxbb_ao_media_cpu.hw, + [CLKID_AO_AHB_SRAM] = &gxbb_ao_ahb_sram.hw, + [CLKID_AO_AHB_BUS] = &gxbb_ao_ahb_bus.hw, + [CLKID_AO_IFACE] = &gxbb_ao_iface.hw, + [CLKID_AO_I2C] = &gxbb_ao_i2c.hw, + [CLKID_SD_EMMC_A] = &gxbb_emmc_a.hw, + [CLKID_SD_EMMC_B] = &gxbb_emmc_b.hw, + [CLKID_SD_EMMC_C] = &gxbb_emmc_c.hw, + [CLKID_SAR_ADC_CLK] = &gxbb_sar_adc_clk.hw, + [CLKID_SAR_ADC_SEL] = &gxbb_sar_adc_clk_sel.hw, + [CLKID_SAR_ADC_DIV] = &gxbb_sar_adc_clk_div.hw, + [CLKID_MALI_0_SEL] = &gxbb_mali_0_sel.hw, + [CLKID_MALI_0_DIV] = &gxbb_mali_0_div.hw, + [CLKID_MALI_0] = &gxbb_mali_0.hw, + [CLKID_MALI_1_SEL] = &gxbb_mali_1_sel.hw, + [CLKID_MALI_1_DIV] = &gxbb_mali_1_div.hw, + [CLKID_MALI_1] = &gxbb_mali_1.hw, + [CLKID_MALI] = &gxbb_mali.hw, + [CLKID_CTS_AMCLK] = &gxbb_cts_amclk.hw, + [CLKID_CTS_AMCLK_SEL] = &gxbb_cts_amclk_sel.hw, + [CLKID_CTS_AMCLK_DIV] = &gxbb_cts_amclk_div.hw, + [CLKID_CTS_MCLK_I958] = &gxbb_cts_mclk_i958.hw, + [CLKID_CTS_MCLK_I958_SEL] = &gxbb_cts_mclk_i958_sel.hw, + [CLKID_CTS_MCLK_I958_DIV] = &gxbb_cts_mclk_i958_div.hw, + [CLKID_CTS_I958] = &gxbb_cts_i958.hw, }, .num = NR_CLKS, }; @@ -810,13 +1291,20 @@ static struct meson_clk_pll *const gxbb_clk_plls[] = { &gxbb_gp0_pll, }; +static struct meson_clk_pll *const gxl_clk_plls[] = { + &gxbb_fixed_pll, + &gxbb_hdmi_pll, + &gxbb_sys_pll, + &gxl_gp0_pll, +}; + static struct meson_clk_mpll *const gxbb_clk_mplls[] = { &gxbb_mpll0, &gxbb_mpll1, &gxbb_mpll2, }; -static struct clk_gate *gxbb_clk_gates[] = { +static struct clk_gate *const gxbb_clk_gates[] = { &gxbb_clk81, &gxbb_ddr, &gxbb_dos, @@ -900,16 +1388,105 @@ static struct clk_gate *gxbb_clk_gates[] = { &gxbb_emmc_b, &gxbb_emmc_c, &gxbb_sar_adc_clk, + &gxbb_mali_0, + &gxbb_mali_1, + &gxbb_cts_amclk, + &gxbb_cts_mclk_i958, +}; + +static struct clk_mux *const gxbb_clk_muxes[] = { + &gxbb_mpeg_clk_sel, + &gxbb_sar_adc_clk_sel, + &gxbb_mali_0_sel, + &gxbb_mali_1_sel, + &gxbb_mali, + &gxbb_cts_amclk_sel, + &gxbb_cts_mclk_i958_sel, + &gxbb_cts_i958, +}; + +static struct clk_divider *const gxbb_clk_dividers[] = { + &gxbb_mpeg_clk_div, + &gxbb_sar_adc_clk_div, + &gxbb_mali_0_div, + &gxbb_mali_1_div, + &gxbb_cts_mclk_i958_div, +}; + +static struct meson_clk_audio_divider *const gxbb_audio_dividers[] = { + &gxbb_cts_amclk_div, +}; + +struct clkc_data { + struct clk_gate *const *clk_gates; + unsigned int clk_gates_count; + struct meson_clk_mpll *const *clk_mplls; + unsigned int clk_mplls_count; + struct meson_clk_pll *const *clk_plls; + unsigned int clk_plls_count; + struct clk_mux *const *clk_muxes; + unsigned int clk_muxes_count; + struct clk_divider *const *clk_dividers; + unsigned int clk_dividers_count; + struct meson_clk_audio_divider *const *clk_audio_dividers; + unsigned int clk_audio_dividers_count; + struct meson_clk_cpu *cpu_clk; + struct clk_hw_onecell_data *hw_onecell_data; +}; + +static const struct clkc_data gxbb_clkc_data = { + .clk_gates = gxbb_clk_gates, + .clk_gates_count = ARRAY_SIZE(gxbb_clk_gates), + .clk_mplls = gxbb_clk_mplls, + .clk_mplls_count = ARRAY_SIZE(gxbb_clk_mplls), + .clk_plls = gxbb_clk_plls, + .clk_plls_count = ARRAY_SIZE(gxbb_clk_plls), + .clk_muxes = gxbb_clk_muxes, + .clk_muxes_count = ARRAY_SIZE(gxbb_clk_muxes), + .clk_dividers = gxbb_clk_dividers, + .clk_dividers_count = ARRAY_SIZE(gxbb_clk_dividers), + .clk_audio_dividers = gxbb_audio_dividers, + .clk_audio_dividers_count = ARRAY_SIZE(gxbb_audio_dividers), + .cpu_clk = &gxbb_cpu_clk, + .hw_onecell_data = &gxbb_hw_onecell_data, +}; + +static const struct clkc_data gxl_clkc_data = { + .clk_gates = gxbb_clk_gates, + .clk_gates_count = ARRAY_SIZE(gxbb_clk_gates), + .clk_mplls = gxbb_clk_mplls, + .clk_mplls_count = ARRAY_SIZE(gxbb_clk_mplls), + .clk_plls = gxl_clk_plls, + .clk_plls_count = ARRAY_SIZE(gxl_clk_plls), + .clk_muxes = gxbb_clk_muxes, + .clk_muxes_count = ARRAY_SIZE(gxbb_clk_muxes), + .clk_dividers = gxbb_clk_dividers, + .clk_dividers_count = ARRAY_SIZE(gxbb_clk_dividers), + .clk_audio_dividers = gxbb_audio_dividers, + .clk_audio_dividers_count = ARRAY_SIZE(gxbb_audio_dividers), + .cpu_clk = &gxbb_cpu_clk, + .hw_onecell_data = &gxl_hw_onecell_data, +}; + +static const struct of_device_id clkc_match_table[] = { + { .compatible = "amlogic,gxbb-clkc", .data = &gxbb_clkc_data }, + { .compatible = "amlogic,gxl-clkc", .data = &gxl_clkc_data }, + {}, }; static int gxbb_clkc_probe(struct platform_device *pdev) { + const struct clkc_data *clkc_data; void __iomem *clk_base; int ret, clkid, i; struct clk_hw *parent_hw; struct clk *parent_clk; struct device *dev = &pdev->dev; + clkc_data = of_device_get_match_data(&pdev->dev); + if (!clkc_data) + return -EINVAL; + /* Generic clocks and PLLs */ clk_base = of_iomap(dev->of_node, 0); if (!clk_base) { @@ -918,34 +1495,45 @@ static int gxbb_clkc_probe(struct platform_device *pdev) } /* Populate base address for PLLs */ - for (i = 0; i < ARRAY_SIZE(gxbb_clk_plls); i++) - gxbb_clk_plls[i]->base = clk_base; + for (i = 0; i < clkc_data->clk_plls_count; i++) + clkc_data->clk_plls[i]->base = clk_base; /* Populate base address for MPLLs */ - for (i = 0; i < ARRAY_SIZE(gxbb_clk_mplls); i++) - gxbb_clk_mplls[i]->base = clk_base; + for (i = 0; i < clkc_data->clk_mplls_count; i++) + clkc_data->clk_mplls[i]->base = clk_base; /* Populate the base address for CPU clk */ - gxbb_cpu_clk.base = clk_base; + clkc_data->cpu_clk->base = clk_base; + + /* Populate base address for gates */ + for (i = 0; i < clkc_data->clk_gates_count; i++) + clkc_data->clk_gates[i]->reg = clk_base + + (u64)clkc_data->clk_gates[i]->reg; - /* Populate the base address for the MPEG clks */ - gxbb_mpeg_clk_sel.reg = clk_base + (u64)gxbb_mpeg_clk_sel.reg; - gxbb_mpeg_clk_div.reg = clk_base + (u64)gxbb_mpeg_clk_div.reg; + /* Populate base address for muxes */ + for (i = 0; i < clkc_data->clk_muxes_count; i++) + clkc_data->clk_muxes[i]->reg = clk_base + + (u64)clkc_data->clk_muxes[i]->reg; - /* Populate the base address for the SAR ADC clks */ - gxbb_sar_adc_clk_sel.reg = clk_base + (u64)gxbb_sar_adc_clk_sel.reg; - gxbb_sar_adc_clk_div.reg = clk_base + (u64)gxbb_sar_adc_clk_div.reg; + /* Populate base address for dividers */ + for (i = 0; i < clkc_data->clk_dividers_count; i++) + clkc_data->clk_dividers[i]->reg = clk_base + + (u64)clkc_data->clk_dividers[i]->reg; - /* Populate base address for gates */ - for (i = 0; i < ARRAY_SIZE(gxbb_clk_gates); i++) - gxbb_clk_gates[i]->reg = clk_base + - (u64)gxbb_clk_gates[i]->reg; + /* Populate base address for the audio dividers */ + for (i = 0; i < clkc_data->clk_audio_dividers_count; i++) + clkc_data->clk_audio_dividers[i]->base = clk_base; /* * register all clks */ - for (clkid = 0; clkid < NR_CLKS; clkid++) { - ret = devm_clk_hw_register(dev, gxbb_hw_onecell_data.hws[clkid]); + for (clkid = 0; clkid < clkc_data->hw_onecell_data->num; clkid++) { + /* array might be sparse */ + if (!clkc_data->hw_onecell_data->hws[clkid]) + continue; + + ret = devm_clk_hw_register(dev, + clkc_data->hw_onecell_data->hws[clkid]); if (ret) goto iounmap; } @@ -964,9 +1552,9 @@ static int gxbb_clkc_probe(struct platform_device *pdev) * a new clk_hw, and this hack will no longer work. Releasing the ccr * feature before that time solves the problem :-) */ - parent_hw = clk_hw_get_parent(&gxbb_cpu_clk.hw); + parent_hw = clk_hw_get_parent(&clkc_data->cpu_clk->hw); parent_clk = parent_hw->clk; - ret = clk_notifier_register(parent_clk, &gxbb_cpu_clk.clk_nb); + ret = clk_notifier_register(parent_clk, &clkc_data->cpu_clk->clk_nb); if (ret) { pr_err("%s: failed to register clock notifier for cpu_clk\n", __func__); @@ -974,23 +1562,18 @@ static int gxbb_clkc_probe(struct platform_device *pdev) } return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, - &gxbb_hw_onecell_data); + clkc_data->hw_onecell_data); iounmap: iounmap(clk_base); return ret; } -static const struct of_device_id gxbb_clkc_match_table[] = { - { .compatible = "amlogic,gxbb-clkc" }, - { } -}; - static struct platform_driver gxbb_driver = { .probe = gxbb_clkc_probe, .driver = { .name = "gxbb-clkc", - .of_match_table = gxbb_clkc_match_table, + .of_match_table = clkc_match_table, }, }; diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h index 945aefa4d251..93b8f07ee7af 100644 --- a/drivers/clk/meson/gxbb.h +++ b/drivers/clk/meson/gxbb.h @@ -71,6 +71,8 @@ #define HHI_GP0_PLL_CNTL2 0x44 /* 0x11 offset in data sheet */ #define HHI_GP0_PLL_CNTL3 0x48 /* 0x12 offset in data sheet */ #define HHI_GP0_PLL_CNTL4 0x4c /* 0x13 offset in data sheet */ +#define HHI_GP0_PLL_CNTL5 0x50 /* 0x14 offset in data sheet */ +#define HHI_GP0_PLL_CNTL1 0x58 /* 0x16 offset in data sheet */ #define HHI_XTAL_DIVN_CNTL 0xbc /* 0x2f offset in data sheet */ #define HHI_TIMER90K 0xec /* 0x3b offset in data sheet */ @@ -275,8 +277,15 @@ #define CLKID_MALI_1_DIV 104 /* CLKID_MALI_1 */ /* CLKID_MALI */ +#define CLKID_CTS_AMCLK 107 +#define CLKID_CTS_AMCLK_SEL 108 +#define CLKID_CTS_AMCLK_DIV 109 +#define CLKID_CTS_MCLK_I958 110 +#define CLKID_CTS_MCLK_I958_SEL 111 +#define CLKID_CTS_MCLK_I958_DIV 112 +#define CLKID_CTS_I958 113 -#define NR_CLKS 107 +#define NR_CLKS 114 /* include the CLKIDs that have been made part of the stable DT binding */ #include <dt-bindings/clock/gxbb-clkc.h> diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 888494d4fb8a..e9985503165c 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -245,6 +245,96 @@ static struct clk_fixed_factor meson8b_fclk_div7 = { }, }; +static struct meson_clk_mpll meson8b_mpll0 = { + .sdm = { + .reg_off = HHI_MPLL_CNTL7, + .shift = 0, + .width = 14, + }, + .sdm_en = { + .reg_off = HHI_MPLL_CNTL7, + .shift = 15, + .width = 1, + }, + .n2 = { + .reg_off = HHI_MPLL_CNTL7, + .shift = 16, + .width = 9, + }, + .en = { + .reg_off = HHI_MPLL_CNTL7, + .shift = 14, + .width = 1, + }, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "mpll0", + .ops = &meson_clk_mpll_ops, + .parent_names = (const char *[]){ "fixed_pll" }, + .num_parents = 1, + }, +}; + +static struct meson_clk_mpll meson8b_mpll1 = { + .sdm = { + .reg_off = HHI_MPLL_CNTL8, + .shift = 0, + .width = 14, + }, + .sdm_en = { + .reg_off = HHI_MPLL_CNTL8, + .shift = 15, + .width = 1, + }, + .n2 = { + .reg_off = HHI_MPLL_CNTL8, + .shift = 16, + .width = 9, + }, + .en = { + .reg_off = HHI_MPLL_CNTL8, + .shift = 14, + .width = 1, + }, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "mpll1", + .ops = &meson_clk_mpll_ops, + .parent_names = (const char *[]){ "fixed_pll" }, + .num_parents = 1, + }, +}; + +static struct meson_clk_mpll meson8b_mpll2 = { + .sdm = { + .reg_off = HHI_MPLL_CNTL9, + .shift = 0, + .width = 14, + }, + .sdm_en = { + .reg_off = HHI_MPLL_CNTL9, + .shift = 15, + .width = 1, + }, + .n2 = { + .reg_off = HHI_MPLL_CNTL9, + .shift = 16, + .width = 9, + }, + .en = { + .reg_off = HHI_MPLL_CNTL9, + .shift = 14, + .width = 1, + }, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "mpll2", + .ops = &meson_clk_mpll_ops, + .parent_names = (const char *[]){ "fixed_pll" }, + .num_parents = 1, + }, +}; + /* * FIXME cpu clocks and the legacy composite clocks (e.g. clk81) are both PLL * post-dividers and should be modeled with their respective PLLs via the @@ -491,6 +581,9 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = { [CLKID_AO_AHB_SRAM] = &meson8b_ao_ahb_sram.hw, [CLKID_AO_AHB_BUS] = &meson8b_ao_ahb_bus.hw, [CLKID_AO_IFACE] = &meson8b_ao_iface.hw, + [CLKID_MPLL0] = &meson8b_mpll0.hw, + [CLKID_MPLL1] = &meson8b_mpll1.hw, + [CLKID_MPLL2] = &meson8b_mpll2.hw, }, .num = CLK_NR_CLKS, }; @@ -501,7 +594,13 @@ static struct meson_clk_pll *const meson8b_clk_plls[] = { &meson8b_sys_pll, }; -static struct clk_gate *meson8b_clk_gates[] = { +static struct meson_clk_mpll *const meson8b_clk_mplls[] = { + &meson8b_mpll0, + &meson8b_mpll1, + &meson8b_mpll2, +}; + +static struct clk_gate *const meson8b_clk_gates[] = { &meson8b_clk81, &meson8b_ddr, &meson8b_dos, @@ -582,6 +681,14 @@ static struct clk_gate *meson8b_clk_gates[] = { &meson8b_ao_iface, }; +static struct clk_mux *const meson8b_clk_muxes[] = { + &meson8b_mpeg_clk_sel, +}; + +static struct clk_divider *const meson8b_clk_dividers[] = { + &meson8b_mpeg_clk_div, +}; + static int meson8b_clkc_probe(struct platform_device *pdev) { void __iomem *clk_base; @@ -601,18 +708,28 @@ static int meson8b_clkc_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(meson8b_clk_plls); i++) meson8b_clk_plls[i]->base = clk_base; + /* Populate base address for MPLLs */ + for (i = 0; i < ARRAY_SIZE(meson8b_clk_mplls); i++) + meson8b_clk_mplls[i]->base = clk_base; + /* Populate the base address for CPU clk */ meson8b_cpu_clk.base = clk_base; - /* Populate the base address for the MPEG clks */ - meson8b_mpeg_clk_sel.reg = clk_base + (u32)meson8b_mpeg_clk_sel.reg; - meson8b_mpeg_clk_div.reg = clk_base + (u32)meson8b_mpeg_clk_div.reg; - /* Populate base address for gates */ for (i = 0; i < ARRAY_SIZE(meson8b_clk_gates); i++) meson8b_clk_gates[i]->reg = clk_base + (u32)meson8b_clk_gates[i]->reg; + /* Populate base address for muxes */ + for (i = 0; i < ARRAY_SIZE(meson8b_clk_muxes); i++) + meson8b_clk_muxes[i]->reg = clk_base + + (u32)meson8b_clk_muxes[i]->reg; + + /* Populate base address for dividers */ + for (i = 0; i < ARRAY_SIZE(meson8b_clk_dividers); i++) + meson8b_clk_dividers[i]->reg = clk_base + + (u32)meson8b_clk_dividers[i]->reg; + /* * register all clks * CLKID_UNUSED = 0, so skip it and start with CLKID_XTAL = 1 diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h index 010e9582888d..3881defc8644 100644 --- a/drivers/clk/meson/meson8b.h +++ b/drivers/clk/meson/meson8b.h @@ -42,6 +42,21 @@ #define HHI_VID_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */ /* + * MPLL register offeset taken from the S905 datasheet. Vendor kernel source + * confirm these are the same for the S805. + */ +#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */ +#define HHI_MPLL_CNTL2 0x284 /* 0xa1 offset in data sheet */ +#define HHI_MPLL_CNTL3 0x288 /* 0xa2 offset in data sheet */ +#define HHI_MPLL_CNTL4 0x28C /* 0xa3 offset in data sheet */ +#define HHI_MPLL_CNTL5 0x290 /* 0xa4 offset in data sheet */ +#define HHI_MPLL_CNTL6 0x294 /* 0xa5 offset in data sheet */ +#define HHI_MPLL_CNTL7 0x298 /* 0xa6 offset in data sheet */ +#define HHI_MPLL_CNTL8 0x29C /* 0xa7 offset in data sheet */ +#define HHI_MPLL_CNTL9 0x2A0 /* 0xa8 offset in data sheet */ +#define HHI_MPLL_CNTL10 0x2A4 /* 0xa9 offset in data sheet */ + +/* * CLKID index values * * These indices are entirely contrived and do not map onto the hardware. @@ -142,8 +157,11 @@ #define CLKID_AO_AHB_SRAM 90 #define CLKID_AO_AHB_BUS 91 #define CLKID_AO_IFACE 92 +#define CLKID_MPLL0 93 +#define CLKID_MPLL1 94 +#define CLKID_MPLL2 95 -#define CLK_NR_CLKS 93 +#define CLK_NR_CLKS 96 /* include the CLKIDs that have been made part of the stable DT binding */ #include <dt-bindings/clock/meson8b-clkc.h> diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c index 044892b6534d..072aa38374ce 100644 --- a/drivers/clk/mvebu/clk-cpu.c +++ b/drivers/clk/mvebu/clk-cpu.c @@ -186,11 +186,11 @@ static void __init of_cpu_clk_setup(struct device_node *node) for_each_node_by_type(dn, "cpu") ncpus++; - cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL); + cpuclk = kcalloc(ncpus, sizeof(*cpuclk), GFP_KERNEL); if (WARN_ON(!cpuclk)) goto cpuclk_out; - clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL); + clks = kcalloc(ncpus, sizeof(*clks), GFP_KERNEL); if (WARN_ON(!clks)) goto clks_out; diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c index 66be2e0c82b4..472c88b90256 100644 --- a/drivers/clk/mvebu/common.c +++ b/drivers/clk/mvebu/common.c @@ -126,7 +126,7 @@ void __init mvebu_coreclk_setup(struct device_node *np, if (desc->get_refclk_freq) clk_data.clk_num += 1; - clk_data.clks = kzalloc(clk_data.clk_num * sizeof(struct clk *), + clk_data.clks = kcalloc(clk_data.clk_num, sizeof(*clk_data.clks), GFP_KERNEL); if (WARN_ON(!clk_data.clks)) { iounmap(base); @@ -270,7 +270,7 @@ void __init mvebu_clk_gating_setup(struct device_node *np, n++; ctrl->num_gates = n; - ctrl->gates = kzalloc(ctrl->num_gates * sizeof(struct clk *), + ctrl->gates = kcalloc(ctrl->num_gates, sizeof(*ctrl->gates), GFP_KERNEL); if (WARN_ON(!ctrl->gates)) goto gates_out; diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index 3487c267833e..d990fe44aef3 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -165,7 +165,7 @@ static int clk_smd_rpm_handoff(struct clk_smd_rpm *r) struct clk_smd_rpm_req req = { .key = cpu_to_le32(r->rpm_key), .nbytes = cpu_to_le32(sizeof(u32)), - .value = cpu_to_le32(INT_MAX), + .value = cpu_to_le32(r->branch ? 1 : INT_MAX), }; ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE, diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c index 9b97246287a7..352394d8fd8c 100644 --- a/drivers/clk/qcom/mmcc-msm8996.c +++ b/drivers/clk/qcom/mmcc-msm8996.c @@ -2944,6 +2944,7 @@ static struct gdsc venus_core0_gdsc = { .pd = { .name = "venus_core0", }, + .parent = &venus_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, .flags = HW_CTRL, }; @@ -2955,6 +2956,7 @@ static struct gdsc venus_core1_gdsc = { .pd = { .name = "venus_core1", }, + .parent = &venus_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, .flags = HW_CTRL, }; @@ -2986,7 +2988,7 @@ static struct gdsc vfe1_gdsc = { .cxcs = (unsigned int []){ 0x36ac }, .cxc_count = 1, .pd = { - .name = "vfe0", + .name = "vfe1", }, .parent = &camss_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c index bfffdb00df97..eaa98b488f01 100644 --- a/drivers/clk/renesas/r8a7795-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c @@ -16,6 +16,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/soc/renesas/rcar-rst.h> +#include <linux/sys_soc.h> #include <dt-bindings/clock/r8a7795-cpg-mssr.h> @@ -24,7 +25,7 @@ enum clk_ids { /* Core Clock Outputs exported to DT */ - LAST_DT_CORE_CLK = R8A7795_CLK_OSC, + LAST_DT_CORE_CLK = R8A7795_CLK_S0D12, /* External Input Clocks */ CLK_EXTAL, @@ -51,10 +52,10 @@ enum clk_ids { MOD_CLK_BASE }; -static const struct cpg_core_clk r8a7795_core_clks[] __initconst = { +static struct cpg_core_clk r8a7795_core_clks[] __initdata = { /* External Clock Inputs */ - DEF_INPUT("extal", CLK_EXTAL), - DEF_INPUT("extalr", CLK_EXTALR), + DEF_INPUT("extal", CLK_EXTAL), + DEF_INPUT("extalr", CLK_EXTALR), /* Internal Core Clocks */ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL), @@ -78,7 +79,12 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = { DEF_FIXED("zt", R8A7795_CLK_ZT, CLK_PLL1_DIV2, 4, 1), DEF_FIXED("zx", R8A7795_CLK_ZX, CLK_PLL1_DIV2, 2, 1), DEF_FIXED("s0d1", R8A7795_CLK_S0D1, CLK_S0, 1, 1), + DEF_FIXED("s0d2", R8A7795_CLK_S0D2, CLK_S0, 2, 1), + DEF_FIXED("s0d3", R8A7795_CLK_S0D3, CLK_S0, 3, 1), DEF_FIXED("s0d4", R8A7795_CLK_S0D4, CLK_S0, 4, 1), + DEF_FIXED("s0d6", R8A7795_CLK_S0D6, CLK_S0, 6, 1), + DEF_FIXED("s0d8", R8A7795_CLK_S0D8, CLK_S0, 8, 1), + DEF_FIXED("s0d12", R8A7795_CLK_S0D12, CLK_S0, 12, 1), DEF_FIXED("s1d1", R8A7795_CLK_S1D1, CLK_S1, 1, 1), DEF_FIXED("s1d2", R8A7795_CLK_S1D2, CLK_S1, 2, 1), DEF_FIXED("s1d4", R8A7795_CLK_S1D4, CLK_S1, 4, 1), @@ -89,29 +95,29 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = { DEF_FIXED("s3d2", R8A7795_CLK_S3D2, CLK_S3, 2, 1), DEF_FIXED("s3d4", R8A7795_CLK_S3D4, CLK_S3, 4, 1), - DEF_GEN3_SD("sd0", R8A7795_CLK_SD0, CLK_SDSRC, 0x0074), - DEF_GEN3_SD("sd1", R8A7795_CLK_SD1, CLK_SDSRC, 0x0078), - DEF_GEN3_SD("sd2", R8A7795_CLK_SD2, CLK_SDSRC, 0x0268), - DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, CLK_SDSRC, 0x026c), + DEF_GEN3_SD("sd0", R8A7795_CLK_SD0, CLK_SDSRC, 0x074), + DEF_GEN3_SD("sd1", R8A7795_CLK_SD1, CLK_SDSRC, 0x078), + DEF_GEN3_SD("sd2", R8A7795_CLK_SD2, CLK_SDSRC, 0x268), + DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, CLK_SDSRC, 0x26c), DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1), DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1), - DEF_DIV6P1("mso", R8A7795_CLK_MSO, CLK_PLL1_DIV4, 0x014), - DEF_DIV6P1("hdmi", R8A7795_CLK_HDMI, CLK_PLL1_DIV4, 0x250), DEF_DIV6P1("canfd", R8A7795_CLK_CANFD, CLK_PLL1_DIV4, 0x244), DEF_DIV6P1("csi0", R8A7795_CLK_CSI0, CLK_PLL1_DIV4, 0x00c), + DEF_DIV6P1("mso", R8A7795_CLK_MSO, CLK_PLL1_DIV4, 0x014), + DEF_DIV6P1("hdmi", R8A7795_CLK_HDMI, CLK_PLL1_DIV4, 0x250), - DEF_DIV6_RO("osc", R8A7795_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8), + DEF_DIV6_RO("osc", R8A7795_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8), DEF_DIV6_RO("r_int", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32), - DEF_BASE("r", R8A7795_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT), + DEF_BASE("r", R8A7795_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT), }; -static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = { - DEF_MOD("fdp1-2", 117, R8A7795_CLK_S2D1), - DEF_MOD("fdp1-1", 118, R8A7795_CLK_S2D1), - DEF_MOD("fdp1-0", 119, R8A7795_CLK_S2D1), +static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = { + DEF_MOD("fdp1-2", 117, R8A7795_CLK_S2D1), /* ES1.x */ + DEF_MOD("fdp1-1", 118, R8A7795_CLK_S0D1), + DEF_MOD("fdp1-0", 119, R8A7795_CLK_S0D1), DEF_MOD("scif5", 202, R8A7795_CLK_S3D4), DEF_MOD("scif4", 203, R8A7795_CLK_S3D4), DEF_MOD("scif3", 204, R8A7795_CLK_S3D4), @@ -121,9 +127,9 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = { DEF_MOD("msiof2", 209, R8A7795_CLK_MSO), DEF_MOD("msiof1", 210, R8A7795_CLK_MSO), DEF_MOD("msiof0", 211, R8A7795_CLK_MSO), - DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S3D1), - DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S3D1), - DEF_MOD("sys-dmac0", 219, R8A7795_CLK_S3D1), + DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S0D3), + DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S0D3), + DEF_MOD("sys-dmac0", 219, R8A7795_CLK_S0D3), DEF_MOD("cmt3", 300, R8A7795_CLK_R), DEF_MOD("cmt2", 301, R8A7795_CLK_R), DEF_MOD("cmt1", 302, R8A7795_CLK_R), @@ -135,15 +141,15 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = { DEF_MOD("sdif0", 314, R8A7795_CLK_SD0), DEF_MOD("pcie1", 318, R8A7795_CLK_S3D1), DEF_MOD("pcie0", 319, R8A7795_CLK_S3D1), - DEF_MOD("usb3-if1", 327, R8A7795_CLK_S3D1), + DEF_MOD("usb3-if1", 327, R8A7795_CLK_S3D1), /* ES1.x */ DEF_MOD("usb3-if0", 328, R8A7795_CLK_S3D1), DEF_MOD("usb-dmac0", 330, R8A7795_CLK_S3D1), DEF_MOD("usb-dmac1", 331, R8A7795_CLK_S3D1), - DEF_MOD("rwdt0", 402, R8A7795_CLK_R), + DEF_MOD("rwdt", 402, R8A7795_CLK_R), DEF_MOD("intc-ex", 407, R8A7795_CLK_CP), DEF_MOD("intc-ap", 408, R8A7795_CLK_S3D1), - DEF_MOD("audmac0", 502, R8A7795_CLK_S3D4), - DEF_MOD("audmac1", 501, R8A7795_CLK_S3D4), + DEF_MOD("audmac1", 501, R8A7795_CLK_S0D3), + DEF_MOD("audmac0", 502, R8A7795_CLK_S0D3), DEF_MOD("drif7", 508, R8A7795_CLK_S3D2), DEF_MOD("drif6", 509, R8A7795_CLK_S3D2), DEF_MOD("drif5", 510, R8A7795_CLK_S3D2), @@ -159,35 +165,35 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = { DEF_MOD("hscif0", 520, R8A7795_CLK_S3D1), DEF_MOD("thermal", 522, R8A7795_CLK_CP), DEF_MOD("pwm", 523, R8A7795_CLK_S3D4), - DEF_MOD("fcpvd3", 600, R8A7795_CLK_S2D1), - DEF_MOD("fcpvd2", 601, R8A7795_CLK_S2D1), - DEF_MOD("fcpvd1", 602, R8A7795_CLK_S2D1), - DEF_MOD("fcpvd0", 603, R8A7795_CLK_S2D1), - DEF_MOD("fcpvb1", 606, R8A7795_CLK_S2D1), - DEF_MOD("fcpvb0", 607, R8A7795_CLK_S2D1), - DEF_MOD("fcpvi2", 609, R8A7795_CLK_S2D1), - DEF_MOD("fcpvi1", 610, R8A7795_CLK_S2D1), - DEF_MOD("fcpvi0", 611, R8A7795_CLK_S2D1), - DEF_MOD("fcpf2", 613, R8A7795_CLK_S2D1), - DEF_MOD("fcpf1", 614, R8A7795_CLK_S2D1), - DEF_MOD("fcpf0", 615, R8A7795_CLK_S2D1), - DEF_MOD("fcpci1", 616, R8A7795_CLK_S2D1), - DEF_MOD("fcpci0", 617, R8A7795_CLK_S2D1), - DEF_MOD("fcpcs", 619, R8A7795_CLK_S2D1), - DEF_MOD("vspd3", 620, R8A7795_CLK_S2D1), - DEF_MOD("vspd2", 621, R8A7795_CLK_S2D1), - DEF_MOD("vspd1", 622, R8A7795_CLK_S2D1), - DEF_MOD("vspd0", 623, R8A7795_CLK_S2D1), - DEF_MOD("vspbc", 624, R8A7795_CLK_S2D1), - DEF_MOD("vspbd", 626, R8A7795_CLK_S2D1), - DEF_MOD("vspi2", 629, R8A7795_CLK_S2D1), - DEF_MOD("vspi1", 630, R8A7795_CLK_S2D1), - DEF_MOD("vspi0", 631, R8A7795_CLK_S2D1), + DEF_MOD("fcpvd3", 600, R8A7795_CLK_S2D1), /* ES1.x */ + DEF_MOD("fcpvd2", 601, R8A7795_CLK_S0D2), + DEF_MOD("fcpvd1", 602, R8A7795_CLK_S0D2), + DEF_MOD("fcpvd0", 603, R8A7795_CLK_S0D2), + DEF_MOD("fcpvb1", 606, R8A7795_CLK_S0D1), + DEF_MOD("fcpvb0", 607, R8A7795_CLK_S0D1), + DEF_MOD("fcpvi2", 609, R8A7795_CLK_S2D1), /* ES1.x */ + DEF_MOD("fcpvi1", 610, R8A7795_CLK_S0D1), + DEF_MOD("fcpvi0", 611, R8A7795_CLK_S0D1), + DEF_MOD("fcpf2", 613, R8A7795_CLK_S2D1), /* ES1.x */ + DEF_MOD("fcpf1", 614, R8A7795_CLK_S0D1), + DEF_MOD("fcpf0", 615, R8A7795_CLK_S0D1), + DEF_MOD("fcpci1", 616, R8A7795_CLK_S2D1), /* ES1.x */ + DEF_MOD("fcpci0", 617, R8A7795_CLK_S2D1), /* ES1.x */ + DEF_MOD("fcpcs", 619, R8A7795_CLK_S0D1), + DEF_MOD("vspd3", 620, R8A7795_CLK_S2D1), /* ES1.x */ + DEF_MOD("vspd2", 621, R8A7795_CLK_S0D2), + DEF_MOD("vspd1", 622, R8A7795_CLK_S0D2), + DEF_MOD("vspd0", 623, R8A7795_CLK_S0D2), + DEF_MOD("vspbc", 624, R8A7795_CLK_S0D1), + DEF_MOD("vspbd", 626, R8A7795_CLK_S0D1), + DEF_MOD("vspi2", 629, R8A7795_CLK_S2D1), /* ES1.x */ + DEF_MOD("vspi1", 630, R8A7795_CLK_S0D1), + DEF_MOD("vspi0", 631, R8A7795_CLK_S0D1), DEF_MOD("ehci2", 701, R8A7795_CLK_S3D4), DEF_MOD("ehci1", 702, R8A7795_CLK_S3D4), DEF_MOD("ehci0", 703, R8A7795_CLK_S3D4), DEF_MOD("hsusb", 704, R8A7795_CLK_S3D4), - DEF_MOD("csi21", 713, R8A7795_CLK_CSI0), + DEF_MOD("csi21", 713, R8A7795_CLK_CSI0), /* ES1.x */ DEF_MOD("csi20", 714, R8A7795_CLK_CSI0), DEF_MOD("csi41", 715, R8A7795_CLK_CSI0), DEF_MOD("csi40", 716, R8A7795_CLK_CSI0), @@ -198,16 +204,20 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = { DEF_MOD("lvds", 727, R8A7795_CLK_S0D4), DEF_MOD("hdmi1", 728, R8A7795_CLK_HDMI), DEF_MOD("hdmi0", 729, R8A7795_CLK_HDMI), - DEF_MOD("vin7", 804, R8A7795_CLK_S2D1), - DEF_MOD("vin6", 805, R8A7795_CLK_S2D1), - DEF_MOD("vin5", 806, R8A7795_CLK_S2D1), - DEF_MOD("vin4", 807, R8A7795_CLK_S2D1), - DEF_MOD("vin3", 808, R8A7795_CLK_S2D1), - DEF_MOD("vin2", 809, R8A7795_CLK_S2D1), - DEF_MOD("vin1", 810, R8A7795_CLK_S2D1), - DEF_MOD("vin0", 811, R8A7795_CLK_S2D1), - DEF_MOD("etheravb", 812, R8A7795_CLK_S3D2), + DEF_MOD("vin7", 804, R8A7795_CLK_S0D2), + DEF_MOD("vin6", 805, R8A7795_CLK_S0D2), + DEF_MOD("vin5", 806, R8A7795_CLK_S0D2), + DEF_MOD("vin4", 807, R8A7795_CLK_S0D2), + DEF_MOD("vin3", 808, R8A7795_CLK_S0D2), + DEF_MOD("vin2", 809, R8A7795_CLK_S0D2), + DEF_MOD("vin1", 810, R8A7795_CLK_S0D2), + DEF_MOD("vin0", 811, R8A7795_CLK_S0D2), + DEF_MOD("etheravb", 812, R8A7795_CLK_S0D6), DEF_MOD("sata0", 815, R8A7795_CLK_S3D2), + DEF_MOD("imr3", 820, R8A7795_CLK_S0D2), + DEF_MOD("imr2", 821, R8A7795_CLK_S0D2), + DEF_MOD("imr1", 822, R8A7795_CLK_S0D2), + DEF_MOD("imr0", 823, R8A7795_CLK_S0D2), DEF_MOD("gpio7", 905, R8A7795_CLK_CP), DEF_MOD("gpio6", 906, R8A7795_CLK_CP), DEF_MOD("gpio5", 907, R8A7795_CLK_CP), @@ -310,6 +320,82 @@ static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = { { 2, 192, 192, }, }; +static const struct soc_device_attribute r8a7795es1[] __initconst = { + { .soc_id = "r8a7795", .revision = "ES1.*" }, + { /* sentinel */ } +}; + + + /* + * Fixups for R-Car H3 ES1.x + */ + +static const unsigned int r8a7795es1_mod_nullify[] __initconst = { + MOD_CLK_ID(326), /* USB-DMAC3-0 */ + MOD_CLK_ID(329), /* USB-DMAC3-1 */ + MOD_CLK_ID(700), /* EHCI/OHCI3 */ + MOD_CLK_ID(705), /* HS-USB-IF3 */ + +}; + +static const struct mssr_mod_reparent r8a7795es1_mod_reparent[] __initconst = { + { MOD_CLK_ID(118), R8A7795_CLK_S2D1 }, /* FDP1-1 */ + { MOD_CLK_ID(119), R8A7795_CLK_S2D1 }, /* FDP1-0 */ + { MOD_CLK_ID(217), R8A7795_CLK_S3D1 }, /* SYS-DMAC2 */ + { MOD_CLK_ID(218), R8A7795_CLK_S3D1 }, /* SYS-DMAC1 */ + { MOD_CLK_ID(219), R8A7795_CLK_S3D1 }, /* SYS-DMAC0 */ + { MOD_CLK_ID(501), R8A7795_CLK_S3D1 }, /* AUDMAC1 */ + { MOD_CLK_ID(502), R8A7795_CLK_S3D1 }, /* AUDMAC0 */ + { MOD_CLK_ID(601), R8A7795_CLK_S2D1 }, /* FCPVD2 */ + { MOD_CLK_ID(602), R8A7795_CLK_S2D1 }, /* FCPVD1 */ + { MOD_CLK_ID(603), R8A7795_CLK_S2D1 }, /* FCPVD0 */ + { MOD_CLK_ID(606), R8A7795_CLK_S2D1 }, /* FCPVB1 */ + { MOD_CLK_ID(607), R8A7795_CLK_S2D1 }, /* FCPVB0 */ + { MOD_CLK_ID(610), R8A7795_CLK_S2D1 }, /* FCPVI1 */ + { MOD_CLK_ID(611), R8A7795_CLK_S2D1 }, /* FCPVI0 */ + { MOD_CLK_ID(614), R8A7795_CLK_S2D1 }, /* FCPF1 */ + { MOD_CLK_ID(615), R8A7795_CLK_S2D1 }, /* FCPF0 */ + { MOD_CLK_ID(619), R8A7795_CLK_S2D1 }, /* FCPCS */ + { MOD_CLK_ID(621), R8A7795_CLK_S2D1 }, /* VSPD2 */ + { MOD_CLK_ID(622), R8A7795_CLK_S2D1 }, /* VSPD1 */ + { MOD_CLK_ID(623), R8A7795_CLK_S2D1 }, /* VSPD0 */ + { MOD_CLK_ID(624), R8A7795_CLK_S2D1 }, /* VSPBC */ + { MOD_CLK_ID(626), R8A7795_CLK_S2D1 }, /* VSPBD */ + { MOD_CLK_ID(630), R8A7795_CLK_S2D1 }, /* VSPI1 */ + { MOD_CLK_ID(631), R8A7795_CLK_S2D1 }, /* VSPI0 */ + { MOD_CLK_ID(804), R8A7795_CLK_S2D1 }, /* VIN7 */ + { MOD_CLK_ID(805), R8A7795_CLK_S2D1 }, /* VIN6 */ + { MOD_CLK_ID(806), R8A7795_CLK_S2D1 }, /* VIN5 */ + { MOD_CLK_ID(807), R8A7795_CLK_S2D1 }, /* VIN4 */ + { MOD_CLK_ID(808), R8A7795_CLK_S2D1 }, /* VIN3 */ + { MOD_CLK_ID(809), R8A7795_CLK_S2D1 }, /* VIN2 */ + { MOD_CLK_ID(810), R8A7795_CLK_S2D1 }, /* VIN1 */ + { MOD_CLK_ID(811), R8A7795_CLK_S2D1 }, /* VIN0 */ + { MOD_CLK_ID(812), R8A7795_CLK_S3D2 }, /* EAVB-IF */ + { MOD_CLK_ID(820), R8A7795_CLK_S2D1 }, /* IMR3 */ + { MOD_CLK_ID(821), R8A7795_CLK_S2D1 }, /* IMR2 */ + { MOD_CLK_ID(822), R8A7795_CLK_S2D1 }, /* IMR1 */ + { MOD_CLK_ID(823), R8A7795_CLK_S2D1 }, /* IMR0 */ +}; + + + /* + * Fixups for R-Car H3 ES2.x + */ + +static const unsigned int r8a7795es2_mod_nullify[] __initconst = { + MOD_CLK_ID(117), /* FDP1-2 */ + MOD_CLK_ID(327), /* USB3-IF1 */ + MOD_CLK_ID(600), /* FCPVD3 */ + MOD_CLK_ID(609), /* FCPVI2 */ + MOD_CLK_ID(613), /* FCPF2 */ + MOD_CLK_ID(616), /* FCPCI1 */ + MOD_CLK_ID(617), /* FCPCI0 */ + MOD_CLK_ID(620), /* VSPD3 */ + MOD_CLK_ID(629), /* VSPI2 */ + MOD_CLK_ID(713), /* CSI21 */ +}; + static int __init r8a7795_cpg_mssr_init(struct device *dev) { const struct rcar_gen3_cpg_pll_config *cpg_pll_config; @@ -326,7 +412,26 @@ static int __init r8a7795_cpg_mssr_init(struct device *dev) return -EINVAL; } - return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR); + if (soc_device_match(r8a7795es1)) { + cpg_core_nullify_range(r8a7795_core_clks, + ARRAY_SIZE(r8a7795_core_clks), + R8A7795_CLK_S0D2, R8A7795_CLK_S0D12); + mssr_mod_nullify(r8a7795_mod_clks, + ARRAY_SIZE(r8a7795_mod_clks), + r8a7795es1_mod_nullify, + ARRAY_SIZE(r8a7795es1_mod_nullify)); + mssr_mod_reparent(r8a7795_mod_clks, + ARRAY_SIZE(r8a7795_mod_clks), + r8a7795es1_mod_reparent, + ARRAY_SIZE(r8a7795es1_mod_reparent)); + } else { + mssr_mod_nullify(r8a7795_mod_clks, + ARRAY_SIZE(r8a7795_mod_clks), + r8a7795es2_mod_nullify, + ARRAY_SIZE(r8a7795es2_mod_nullify)); + } + + return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode); } const struct cpg_mssr_info r8a7795_cpg_mssr_info __initconst = { diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c index 11e084a56b0d..9d114b31b073 100644 --- a/drivers/clk/renesas/r8a7796-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c @@ -54,8 +54,8 @@ enum clk_ids { static const struct cpg_core_clk r8a7796_core_clks[] __initconst = { /* External Clock Inputs */ - DEF_INPUT("extal", CLK_EXTAL), - DEF_INPUT("extalr", CLK_EXTALR), + DEF_INPUT("extal", CLK_EXTAL), + DEF_INPUT("extalr", CLK_EXTALR), /* Internal Core Clocks */ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL), @@ -95,10 +95,10 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = { DEF_FIXED("s3d2", R8A7796_CLK_S3D2, CLK_S3, 2, 1), DEF_FIXED("s3d4", R8A7796_CLK_S3D4, CLK_S3, 4, 1), - DEF_GEN3_SD("sd0", R8A7796_CLK_SD0, CLK_SDSRC, 0x0074), - DEF_GEN3_SD("sd1", R8A7796_CLK_SD1, CLK_SDSRC, 0x0078), - DEF_GEN3_SD("sd2", R8A7796_CLK_SD2, CLK_SDSRC, 0x0268), - DEF_GEN3_SD("sd3", R8A7796_CLK_SD3, CLK_SDSRC, 0x026c), + DEF_GEN3_SD("sd0", R8A7796_CLK_SD0, CLK_SDSRC, 0x074), + DEF_GEN3_SD("sd1", R8A7796_CLK_SD1, CLK_SDSRC, 0x078), + DEF_GEN3_SD("sd2", R8A7796_CLK_SD2, CLK_SDSRC, 0x268), + DEF_GEN3_SD("sd3", R8A7796_CLK_SD3, CLK_SDSRC, 0x26c), DEF_FIXED("cl", R8A7796_CLK_CL, CLK_PLL1_DIV2, 48, 1), DEF_FIXED("cp", R8A7796_CLK_CP, CLK_EXTAL, 2, 1), @@ -135,7 +135,7 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = { DEF_MOD("sdif2", 312, R8A7796_CLK_SD2), DEF_MOD("sdif1", 313, R8A7796_CLK_SD1), DEF_MOD("sdif0", 314, R8A7796_CLK_SD0), - DEF_MOD("rwdt0", 402, R8A7796_CLK_R), + DEF_MOD("rwdt", 402, R8A7796_CLK_R), DEF_MOD("intc-ap", 408, R8A7796_CLK_S3D1), DEF_MOD("drif7", 508, R8A7796_CLK_S3D2), DEF_MOD("drif6", 509, R8A7796_CLK_S3D2), @@ -179,6 +179,8 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = { DEF_MOD("vin1", 810, R8A7796_CLK_S0D2), DEF_MOD("vin0", 811, R8A7796_CLK_S0D2), DEF_MOD("etheravb", 812, R8A7796_CLK_S0D6), + DEF_MOD("imr1", 822, R8A7796_CLK_S0D2), + DEF_MOD("imr0", 823, R8A7796_CLK_S0D2), DEF_MOD("gpio7", 905, R8A7796_CLK_S3D4), DEF_MOD("gpio6", 906, R8A7796_CLK_S3D4), DEF_MOD("gpio5", 907, R8A7796_CLK_S3D4), @@ -271,7 +273,7 @@ static int __init r8a7796_cpg_mssr_init(struct device *dev) return -EINVAL; } - return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR); + return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode); } const struct cpg_mssr_info r8a7796_cpg_mssr_info __initconst = { diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c index 742f6dc7c156..3dee900522b7 100644 --- a/drivers/clk/renesas/rcar-gen3-cpg.c +++ b/drivers/clk/renesas/rcar-gen3-cpg.c @@ -20,6 +20,7 @@ #include <linux/init.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/sys_soc.h> #include "renesas-cpg-mssr.h" #include "rcar-gen3-cpg.h" @@ -247,6 +248,27 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core, static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata; static unsigned int cpg_clk_extalr __initdata; +static u32 cpg_mode __initdata; +static u32 cpg_quirks __initdata; + +#define PLL_ERRATA BIT(0) /* Missing PLL0/2/4 post-divider */ +#define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */ + +static const struct soc_device_attribute cpg_quirks_match[] __initconst = { + { + .soc_id = "r8a7795", .revision = "ES1.0", + .data = (void *)(PLL_ERRATA | RCKCR_CKSEL), + }, + { + .soc_id = "r8a7795", .revision = "ES1.*", + .data = (void *)RCKCR_CKSEL, + }, + { + .soc_id = "r8a7796", .revision = "ES1.0", + .data = (void *)RCKCR_CKSEL, + }, + { /* sentinel */ } +}; struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, const struct cpg_core_clk *core, const struct cpg_mssr_info *info, @@ -275,6 +297,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, */ value = readl(base + CPG_PLL0CR); mult = (((value >> 24) & 0x7f) + 1) * 2; + if (cpg_quirks & PLL_ERRATA) + mult *= 2; break; case CLK_TYPE_GEN3_PLL1: @@ -290,6 +314,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, */ value = readl(base + CPG_PLL2CR); mult = (((value >> 24) & 0x7f) + 1) * 2; + if (cpg_quirks & PLL_ERRATA) + mult *= 2; break; case CLK_TYPE_GEN3_PLL3: @@ -305,24 +331,33 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, */ value = readl(base + CPG_PLL4CR); mult = (((value >> 24) & 0x7f) + 1) * 2; + if (cpg_quirks & PLL_ERRATA) + mult *= 2; break; case CLK_TYPE_GEN3_SD: return cpg_sd_clk_register(core, base, __clk_get_name(parent)); case CLK_TYPE_GEN3_R: - /* - * RINT is default. - * Only if EXTALR is populated, we switch to it. - */ - value = readl(base + CPG_RCKCR) & 0x3f; - - if (clk_get_rate(clks[cpg_clk_extalr])) { - parent = clks[cpg_clk_extalr]; - value |= BIT(15); + if (cpg_quirks & RCKCR_CKSEL) { + /* + * RINT is default. + * Only if EXTALR is populated, we switch to it. + */ + value = readl(base + CPG_RCKCR) & 0x3f; + + if (clk_get_rate(clks[cpg_clk_extalr])) { + parent = clks[cpg_clk_extalr]; + value |= BIT(15); + } + + writel(value, base + CPG_RCKCR); + break; } - writel(value, base + CPG_RCKCR); + /* Select parent clock of RCLK by MD28 */ + if (cpg_mode & BIT(28)) + parent = clks[cpg_clk_extalr]; break; default: @@ -334,9 +369,16 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, } int __init rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config, - unsigned int clk_extalr) + unsigned int clk_extalr, u32 mode) { + const struct soc_device_attribute *attr; + cpg_pll_config = config; cpg_clk_extalr = clk_extalr; + cpg_mode = mode; + attr = soc_device_match(cpg_quirks_match); + if (attr) + cpg_quirks = (uintptr_t)attr->data; + pr_debug("%s: mode = 0x%x quirks = 0x%x\n", __func__, mode, cpg_quirks); return 0; } diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h index f788f481dd42..073be54b5d03 100644 --- a/drivers/clk/renesas/rcar-gen3-cpg.h +++ b/drivers/clk/renesas/rcar-gen3-cpg.h @@ -37,6 +37,6 @@ struct clk *rcar_gen3_cpg_clk_register(struct device *dev, const struct cpg_core_clk *core, const struct cpg_mssr_info *info, struct clk **clks, void __iomem *base); int rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config, - unsigned int clk_extalr); + unsigned int clk_extalr, u32 mode); #endif diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index eadcbd43ff88..99eeec6f24ec 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -265,6 +265,11 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core, WARN_DEBUG(id >= priv->num_core_clks); WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT); + if (!core->name) { + /* Skip NULLified clock */ + return; + } + switch (core->type) { case CLK_TYPE_IN: clk = of_clk_get_by_name(priv->dev->of_node, core->name); @@ -335,6 +340,11 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod, WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks); WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT); + if (!mod->name) { + /* Skip NULLified clock */ + return; + } + parent = priv->clks[mod->parent]; if (IS_ERR(parent)) { clk = parent; @@ -734,5 +744,45 @@ static int __init cpg_mssr_init(void) subsys_initcall(cpg_mssr_init); +void __init cpg_core_nullify_range(struct cpg_core_clk *core_clks, + unsigned int num_core_clks, + unsigned int first_clk, + unsigned int last_clk) +{ + unsigned int i; + + for (i = 0; i < num_core_clks; i++) + if (core_clks[i].id >= first_clk && + core_clks[i].id <= last_clk) + core_clks[i].name = NULL; +} + +void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks, + unsigned int num_mod_clks, + const unsigned int *clks, unsigned int n) +{ + unsigned int i, j; + + for (i = 0, j = 0; i < num_mod_clks && j < n; i++) + if (mod_clks[i].id == clks[j]) { + mod_clks[i].name = NULL; + j++; + } +} + +void __init mssr_mod_reparent(struct mssr_mod_clk *mod_clks, + unsigned int num_mod_clks, + const struct mssr_mod_reparent *clks, + unsigned int n) +{ + unsigned int i, j; + + for (i = 0, j = 0; i < num_mod_clks && j < n; i++) + if (mod_clks[i].id == clks[j].clk) { + mod_clks[i].parent = clks[j].parent; + j++; + } +} + MODULE_DESCRIPTION("Renesas CPG/MSSR Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h index 4bb7a80c6469..148f4f0aa2a4 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.h +++ b/drivers/clk/renesas/renesas-cpg-mssr.h @@ -134,4 +134,26 @@ extern const struct cpg_mssr_info r8a7743_cpg_mssr_info; extern const struct cpg_mssr_info r8a7745_cpg_mssr_info; extern const struct cpg_mssr_info r8a7795_cpg_mssr_info; extern const struct cpg_mssr_info r8a7796_cpg_mssr_info; + + + /* + * Helpers for fixing up clock tables depending on SoC revision + */ + +struct mssr_mod_reparent { + unsigned int clk, parent; +}; + + +extern void cpg_core_nullify_range(struct cpg_core_clk *core_clks, + unsigned int num_core_clks, + unsigned int first_clk, + unsigned int last_clk); +extern void mssr_mod_nullify(struct mssr_mod_clk *mod_clks, + unsigned int num_mod_clks, + const unsigned int *clks, unsigned int n); +extern void mssr_mod_reparent(struct mssr_mod_clk *mod_clks, + unsigned int num_mod_clks, + const struct mssr_mod_reparent *clks, + unsigned int n); #endif diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile index 141971488f40..26b220c988b2 100644 --- a/drivers/clk/rockchip/Makefile +++ b/drivers/clk/rockchip/Makefile @@ -12,7 +12,7 @@ obj-y += clk-muxgrf.o obj-y += clk-ddr.o obj-$(CONFIG_RESET_CONTROLLER) += softrst.o -obj-y += clk-rk1108.o +obj-y += clk-rv1108.o obj-y += clk-rk3036.o obj-y += clk-rk3188.o obj-y += clk-rk3228.o diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c index eec51893a7e6..dd0433d4753e 100644 --- a/drivers/clk/rockchip/clk-pll.c +++ b/drivers/clk/rockchip/clk-pll.c @@ -269,6 +269,7 @@ static int rockchip_rk3036_pll_enable(struct clk_hw *hw) writel(HIWORD_UPDATE(0, RK3036_PLLCON1_PWRDOWN, 0), pll->reg_base + RK3036_PLLCON(1)); + rockchip_pll_wait_lock(pll); return 0; } @@ -501,6 +502,7 @@ static int rockchip_rk3066_pll_enable(struct clk_hw *hw) writel(HIWORD_UPDATE(0, RK3066_PLLCON3_PWRDOWN, 0), pll->reg_base + RK3066_PLLCON(3)); + rockchip_pll_wait_lock(pll); return 0; } @@ -746,6 +748,7 @@ static int rockchip_rk3399_pll_enable(struct clk_hw *hw) writel(HIWORD_UPDATE(0, RK3399_PLLCON3_PWRDOWN, 0), pll->reg_base + RK3399_PLLCON(3)); + rockchip_rk3399_pll_wait_lock(pll); return 0; } diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c index 1e384e143504..b04f29774ee7 100644 --- a/drivers/clk/rockchip/clk-rk3328.c +++ b/drivers/clk/rockchip/clk-rk3328.c @@ -20,6 +20,7 @@ #include <dt-bindings/clock/rk3328-cru.h> #include "clk.h" +#define RK3328_GRF_SOC_CON4 0x410 #define RK3328_GRF_SOC_STATUS0 0x480 #define RK3328_GRF_MAC_CON1 0x904 #define RK3328_GRF_MAC_CON2 0x908 @@ -214,6 +215,8 @@ PNAME(mux_mac2io_src_p) = { "clk_mac2io_src", "gmac_clkin" }; PNAME(mux_mac2phy_src_p) = { "clk_mac2phy_src", "phy_50m_out" }; +PNAME(mux_mac2io_ext_p) = { "clk_mac2io", + "gmac_clkin" }; static struct rockchip_pll_clock rk3328_pll_clks[] __initdata = { [apll] = PLL(pll_rk3328, PLL_APLL, "apll", mux_pll_p, @@ -680,6 +683,10 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { COMPOSITE(SCLK_MAC2IO_OUT, "clk_mac2io_out", mux_2plls_p, 0, RK3328_CLKSEL_CON(27), 15, 1, MFLAGS, 8, 5, DFLAGS, RK3328_CLKGATE_CON(3), 5, GFLAGS), + MUXGRF(SCLK_MAC2IO, "clk_mac2io", mux_mac2io_src_p, CLK_SET_RATE_NO_REPARENT, + RK3328_GRF_MAC_CON1, 10, 1, MFLAGS), + MUXGRF(SCLK_MAC2IO_EXT, "clk_mac2io_ext", mux_mac2io_ext_p, CLK_SET_RATE_NO_REPARENT, + RK3328_GRF_SOC_CON4, 14, 1, MFLAGS), COMPOSITE(SCLK_MAC2PHY_SRC, "clk_mac2phy_src", mux_2plls_p, 0, RK3328_CLKSEL_CON(26), 7, 1, MFLAGS, 0, 5, DFLAGS, @@ -691,6 +698,8 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { COMPOSITE_NOMUX(SCLK_MAC2PHY_OUT, "clk_mac2phy_out", "clk_mac2phy", 0, RK3328_CLKSEL_CON(26), 8, 2, DFLAGS, RK3328_CLKGATE_CON(9), 2, GFLAGS), + MUXGRF(SCLK_MAC2PHY, "clk_mac2phy", mux_mac2phy_src_p, CLK_SET_RATE_NO_REPARENT, + RK3328_GRF_MAC_CON2, 10, 1, MFLAGS), FACTOR(0, "xin12m", "xin24m", 0, 1, 2), diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c index 6cb474c593e7..024762d3214d 100644 --- a/drivers/clk/rockchip/clk-rk3368.c +++ b/drivers/clk/rockchip/clk-rk3368.c @@ -835,18 +835,18 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = { GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 0, GFLAGS), /* timer gates */ - GATE(0, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS), - GATE(0, "sclk_timer14", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 10, GFLAGS), - GATE(0, "sclk_timer13", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 9, GFLAGS), - GATE(0, "sclk_timer12", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 8, GFLAGS), - GATE(0, "sclk_timer11", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 7, GFLAGS), - GATE(0, "sclk_timer10", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 6, GFLAGS), - GATE(0, "sclk_timer05", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 5, GFLAGS), - GATE(0, "sclk_timer04", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 4, GFLAGS), - GATE(0, "sclk_timer03", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 3, GFLAGS), - GATE(0, "sclk_timer02", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 2, GFLAGS), - GATE(0, "sclk_timer01", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 1, GFLAGS), - GATE(0, "sclk_timer00", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 0, GFLAGS), + GATE(SCLK_TIMER15, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS), + GATE(SCLK_TIMER14, "sclk_timer14", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 10, GFLAGS), + GATE(SCLK_TIMER13, "sclk_timer13", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 9, GFLAGS), + GATE(SCLK_TIMER12, "sclk_timer12", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 8, GFLAGS), + GATE(SCLK_TIMER11, "sclk_timer11", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 7, GFLAGS), + GATE(SCLK_TIMER10, "sclk_timer10", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 6, GFLAGS), + GATE(SCLK_TIMER05, "sclk_timer05", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 5, GFLAGS), + GATE(SCLK_TIMER04, "sclk_timer04", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 4, GFLAGS), + GATE(SCLK_TIMER03, "sclk_timer03", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 3, GFLAGS), + GATE(SCLK_TIMER02, "sclk_timer02", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 2, GFLAGS), + GATE(SCLK_TIMER01, "sclk_timer01", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 1, GFLAGS), + GATE(SCLK_TIMER00, "sclk_timer00", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 0, GFLAGS), }; static const char *const rk3368_critical_clocks[] __initconst = { @@ -858,6 +858,9 @@ static const char *const rk3368_critical_clocks[] __initconst = { */ "pclk_pwm1", "pclk_pd_pmu", + "pclk_pd_alive", + "pclk_peri", + "hclk_peri", }; static void __init rk3368_clk_init(struct device_node *np) diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c index 73121b144634..fa3cbef08776 100644 --- a/drivers/clk/rockchip/clk-rk3399.c +++ b/drivers/clk/rockchip/clk-rk3399.c @@ -1477,10 +1477,10 @@ static struct rockchip_clk_branch rk3399_clk_pmu_branches[] __initdata = { GATE(PCLK_UART4_PMU, "pclk_uart4_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 14, GFLAGS), GATE(PCLK_WDT_M0_PMU, "pclk_wdt_m0_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 15, GFLAGS), - GATE(FCLK_CM0S_PMU, "fclk_cm0s_pmu", "fclk_cm0s_src_pmu", 0, RK3399_PMU_CLKGATE_CON(2), 0, GFLAGS), - GATE(SCLK_CM0S_PMU, "sclk_cm0s_pmu", "fclk_cm0s_src_pmu", 0, RK3399_PMU_CLKGATE_CON(2), 1, GFLAGS), - GATE(HCLK_CM0S_PMU, "hclk_cm0s_pmu", "fclk_cm0s_src_pmu", 0, RK3399_PMU_CLKGATE_CON(2), 2, GFLAGS), - GATE(DCLK_CM0S_PMU, "dclk_cm0s_pmu", "fclk_cm0s_src_pmu", 0, RK3399_PMU_CLKGATE_CON(2), 3, GFLAGS), + GATE(FCLK_CM0S_PMU, "fclk_cm0s_pmu", "fclk_cm0s_src_pmu", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(2), 0, GFLAGS), + GATE(SCLK_CM0S_PMU, "sclk_cm0s_pmu", "fclk_cm0s_src_pmu", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(2), 1, GFLAGS), + GATE(HCLK_CM0S_PMU, "hclk_cm0s_pmu", "fclk_cm0s_src_pmu", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(2), 2, GFLAGS), + GATE(DCLK_CM0S_PMU, "dclk_cm0s_pmu", "fclk_cm0s_src_pmu", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(2), 3, GFLAGS), GATE(HCLK_NOC_PMU, "hclk_noc_pmu", "fclk_cm0s_src_pmu", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(2), 5, GFLAGS), }; diff --git a/drivers/clk/rockchip/clk-rk1108.c b/drivers/clk/rockchip/clk-rv1108.c index 92750d798e5d..7c05ab366348 100644 --- a/drivers/clk/rockchip/clk-rk1108.c +++ b/drivers/clk/rockchip/clk-rv1108.c @@ -18,16 +18,16 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/syscore_ops.h> -#include <dt-bindings/clock/rk1108-cru.h> +#include <dt-bindings/clock/rv1108-cru.h> #include "clk.h" -#define RK1108_GRF_SOC_STATUS0 0x480 +#define RV1108_GRF_SOC_STATUS0 0x480 -enum rk1108_plls { +enum rv1108_plls { apll, dpll, gpll, }; -static struct rockchip_pll_rate_table rk1108_pll_rates[] = { +static struct rockchip_pll_rate_table rv1108_pll_rates[] = { /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0), RK3036_PLL_RATE(1584000000, 1, 66, 1, 1, 1, 0), @@ -74,32 +74,32 @@ static struct rockchip_pll_rate_table rk1108_pll_rates[] = { { /* sentinel */ }, }; -#define RK1108_DIV_CORE_MASK 0xf -#define RK1108_DIV_CORE_SHIFT 4 +#define RV1108_DIV_CORE_MASK 0xf +#define RV1108_DIV_CORE_SHIFT 4 -#define RK1108_CLKSEL0(_core_peri_div) \ +#define RV1108_CLKSEL0(_core_peri_div) \ { \ - .reg = RK1108_CLKSEL_CON(1), \ - .val = HIWORD_UPDATE(_core_peri_div, RK1108_DIV_CORE_MASK,\ - RK1108_DIV_CORE_SHIFT) \ + .reg = RV1108_CLKSEL_CON(1), \ + .val = HIWORD_UPDATE(_core_peri_div, RV1108_DIV_CORE_MASK,\ + RV1108_DIV_CORE_SHIFT) \ } -#define RK1108_CPUCLK_RATE(_prate, _core_peri_div) \ +#define RV1108_CPUCLK_RATE(_prate, _core_peri_div) \ { \ .prate = _prate, \ .divs = { \ - RK1108_CLKSEL0(_core_peri_div), \ + RV1108_CLKSEL0(_core_peri_div), \ }, \ } -static struct rockchip_cpuclk_rate_table rk1108_cpuclk_rates[] __initdata = { - RK1108_CPUCLK_RATE(816000000, 4), - RK1108_CPUCLK_RATE(600000000, 4), - RK1108_CPUCLK_RATE(312000000, 4), +static struct rockchip_cpuclk_rate_table rv1108_cpuclk_rates[] __initdata = { + RV1108_CPUCLK_RATE(816000000, 4), + RV1108_CPUCLK_RATE(600000000, 4), + RV1108_CPUCLK_RATE(312000000, 4), }; -static const struct rockchip_cpuclk_reg_data rk1108_cpuclk_data = { - .core_reg = RK1108_CLKSEL_CON(0), +static const struct rockchip_cpuclk_reg_data rv1108_cpuclk_data = { + .core_reg = RV1108_CLKSEL_CON(0), .div_core_shift = 0, .div_core_mask = 0x1f, .mux_core_alt = 1, @@ -131,13 +131,13 @@ PNAME(mux_i2s_out_p) = { "i2s0_pre", "xin12m" }; PNAME(mux_i2s1_p) = { "i2s1_src", "i2s1_frac", "xin12m" }; PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" }; -static struct rockchip_pll_clock rk1108_pll_clks[] __initdata = { - [apll] = PLL(pll_rk3399, PLL_APLL, "apll", mux_pll_p, 0, RK1108_PLL_CON(0), - RK1108_PLL_CON(3), 8, 31, 0, rk1108_pll_rates), - [dpll] = PLL(pll_rk3399, PLL_DPLL, "dpll", mux_pll_p, 0, RK1108_PLL_CON(8), - RK1108_PLL_CON(11), 8, 31, 0, NULL), - [gpll] = PLL(pll_rk3399, PLL_GPLL, "gpll", mux_pll_p, 0, RK1108_PLL_CON(16), - RK1108_PLL_CON(19), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rk1108_pll_rates), +static struct rockchip_pll_clock rv1108_pll_clks[] __initdata = { + [apll] = PLL(pll_rk3399, PLL_APLL, "apll", mux_pll_p, 0, RV1108_PLL_CON(0), + RV1108_PLL_CON(3), 8, 31, 0, rv1108_pll_rates), + [dpll] = PLL(pll_rk3399, PLL_DPLL, "dpll", mux_pll_p, 0, RV1108_PLL_CON(8), + RV1108_PLL_CON(11), 8, 31, 0, NULL), + [gpll] = PLL(pll_rk3399, PLL_GPLL, "gpll", mux_pll_p, 0, RV1108_PLL_CON(16), + RV1108_PLL_CON(19), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rv1108_pll_rates), }; #define MFLAGS CLK_MUX_HIWORD_MASK @@ -145,56 +145,56 @@ static struct rockchip_pll_clock rk1108_pll_clks[] __initdata = { #define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE) #define IFLAGS ROCKCHIP_INVERTER_HIWORD_MASK -static struct rockchip_clk_branch rk1108_uart0_fracmux __initdata = +static struct rockchip_clk_branch rv1108_uart0_fracmux __initdata = MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(13), 8, 2, MFLAGS); + RV1108_CLKSEL_CON(13), 8, 2, MFLAGS); -static struct rockchip_clk_branch rk1108_uart1_fracmux __initdata = +static struct rockchip_clk_branch rv1108_uart1_fracmux __initdata = MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(14), 8, 2, MFLAGS); + RV1108_CLKSEL_CON(14), 8, 2, MFLAGS); -static struct rockchip_clk_branch rk1108_uart2_fracmux __initdata = +static struct rockchip_clk_branch rv1108_uart2_fracmux __initdata = MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(15), 8, 2, MFLAGS); + RV1108_CLKSEL_CON(15), 8, 2, MFLAGS); -static struct rockchip_clk_branch rk1108_i2s0_fracmux __initdata = +static struct rockchip_clk_branch rv1108_i2s0_fracmux __initdata = MUX(0, "i2s0_pre", mux_i2s0_pre_p, CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(5), 12, 2, MFLAGS); + RV1108_CLKSEL_CON(5), 12, 2, MFLAGS); -static struct rockchip_clk_branch rk1108_i2s1_fracmux __initdata = +static struct rockchip_clk_branch rv1108_i2s1_fracmux __initdata = MUX(0, "i2s1_pre", mux_i2s1_p, CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(6), 12, 2, MFLAGS); + RV1108_CLKSEL_CON(6), 12, 2, MFLAGS); -static struct rockchip_clk_branch rk1108_i2s2_fracmux __initdata = +static struct rockchip_clk_branch rv1108_i2s2_fracmux __initdata = MUX(0, "i2s2_pre", mux_i2s2_p, CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(7), 12, 2, MFLAGS); + RV1108_CLKSEL_CON(7), 12, 2, MFLAGS); -static struct rockchip_clk_branch rk1108_clk_branches[] __initdata = { +static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = { MUX(0, "hdmi_phy", mux_hdmiphy_phy_p, CLK_SET_RATE_PARENT, - RK1108_MISC_CON, 13, 2, MFLAGS), + RV1108_MISC_CON, 13, 2, MFLAGS), MUX(0, "usb480m", mux_usb480m_pre_p, CLK_SET_RATE_PARENT, - RK1108_MISC_CON, 15, 2, MFLAGS), + RV1108_MISC_CON, 15, 2, MFLAGS), /* * Clock-Architecture Diagram 2 */ /* PD_CORE */ GATE(0, "dpll_core", "dpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(0), 1, GFLAGS), + RV1108_CLKGATE_CON(0), 1, GFLAGS), GATE(0, "apll_core", "apll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(0), 0, GFLAGS), + RV1108_CLKGATE_CON(0), 0, GFLAGS), GATE(0, "gpll_core", "gpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(0), 2, GFLAGS), + RV1108_CLKGATE_CON(0), 2, GFLAGS), COMPOSITE_NOMUX(0, "pclken_dbg", "armclk", CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(1), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY, - RK1108_CLKGATE_CON(0), 5, GFLAGS), + RV1108_CLKSEL_CON(1), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY, + RV1108_CLKGATE_CON(0), 5, GFLAGS), COMPOSITE_NOMUX(ACLK_ENMCORE, "aclkenm_core", "armclk", CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(1), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, - RK1108_CLKGATE_CON(0), 4, GFLAGS), + RV1108_CLKSEL_CON(1), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, + RV1108_CLKGATE_CON(0), 4, GFLAGS), GATE(ACLK_CORE, "aclk_core", "aclkenm_core", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(11), 0, GFLAGS), + RV1108_CLKGATE_CON(11), 0, GFLAGS), GATE(0, "pclk_dbg", "pclken_dbg", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(11), 1, GFLAGS), + RV1108_CLKGATE_CON(11), 1, GFLAGS), /* PD_RKVENC */ @@ -202,58 +202,58 @@ static struct rockchip_clk_branch rk1108_clk_branches[] __initdata = { /* PD_PMU_wrapper */ COMPOSITE_NOMUX(0, "pmu_24m_ena", "gpll", CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(38), 0, 5, DFLAGS, - RK1108_CLKGATE_CON(8), 12, GFLAGS), + RV1108_CLKSEL_CON(38), 0, 5, DFLAGS, + RV1108_CLKGATE_CON(8), 12, GFLAGS), GATE(0, "pmu", "pmu_24m_ena", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(10), 0, GFLAGS), + RV1108_CLKGATE_CON(10), 0, GFLAGS), GATE(0, "intmem1", "pmu_24m_ena", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(10), 1, GFLAGS), + RV1108_CLKGATE_CON(10), 1, GFLAGS), GATE(0, "gpio0_pmu", "pmu_24m_ena", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(10), 2, GFLAGS), + RV1108_CLKGATE_CON(10), 2, GFLAGS), GATE(0, "pmugrf", "pmu_24m_ena", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(10), 3, GFLAGS), + RV1108_CLKGATE_CON(10), 3, GFLAGS), GATE(0, "pmu_noc", "pmu_24m_ena", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(10), 4, GFLAGS), + RV1108_CLKGATE_CON(10), 4, GFLAGS), GATE(0, "i2c0_pmu_pclk", "pmu_24m_ena", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(10), 5, GFLAGS), + RV1108_CLKGATE_CON(10), 5, GFLAGS), GATE(0, "pwm0_pmu_pclk", "pmu_24m_ena", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(10), 6, GFLAGS), + RV1108_CLKGATE_CON(10), 6, GFLAGS), COMPOSITE(0, "pwm0_pmu_clk", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(12), 7, 1, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(8), 15, GFLAGS), + RV1108_CLKSEL_CON(12), 7, 1, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(8), 15, GFLAGS), COMPOSITE(0, "i2c0_pmu_clk", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(19), 7, 1, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(8), 14, GFLAGS), + RV1108_CLKSEL_CON(19), 7, 1, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(8), 14, GFLAGS), GATE(0, "pvtm_pmu", "xin24m", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(8), 13, GFLAGS), + RV1108_CLKGATE_CON(8), 13, GFLAGS), /* * Clock-Architecture Diagram 4 */ COMPOSITE(0, "aclk_vio0_2wrap_occ", mux_pll_src_4plls_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 5, DFLAGS, - RK1108_CLKGATE_CON(6), 0, GFLAGS), + RV1108_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 5, DFLAGS, + RV1108_CLKGATE_CON(6), 0, GFLAGS), GATE(0, "aclk_vio0_pre", "aclk_vio0_2wrap_occ", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(17), 0, GFLAGS), + RV1108_CLKGATE_CON(17), 0, GFLAGS), COMPOSITE_NOMUX(0, "hclk_vio_pre", "aclk_vio0_pre", 0, - RK1108_CLKSEL_CON(29), 0, 5, DFLAGS, - RK1108_CLKGATE_CON(7), 2, GFLAGS), + RV1108_CLKSEL_CON(29), 0, 5, DFLAGS, + RV1108_CLKGATE_CON(7), 2, GFLAGS), COMPOSITE_NOMUX(0, "pclk_vio_pre", "aclk_vio0_pre", 0, - RK1108_CLKSEL_CON(29), 8, 5, DFLAGS, - RK1108_CLKGATE_CON(7), 3, GFLAGS), + RV1108_CLKSEL_CON(29), 8, 5, DFLAGS, + RV1108_CLKGATE_CON(7), 3, GFLAGS), INVERTER(0, "pclk_vip", "ext_vip", - RK1108_CLKSEL_CON(31), 8, IFLAGS), + RV1108_CLKSEL_CON(31), 8, IFLAGS), GATE(0, "pclk_isp_pre", "pclk_vip", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(7), 6, GFLAGS), + RV1108_CLKGATE_CON(7), 6, GFLAGS), GATE(0, "pclk_isp", "pclk_isp_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(18), 10, GFLAGS), + RV1108_CLKGATE_CON(18), 10, GFLAGS), GATE(0, "dclk_hdmiphy_src_gpll", "gpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(6), 5, GFLAGS), + RV1108_CLKGATE_CON(6), 5, GFLAGS), GATE(0, "dclk_hdmiphy_src_dpll", "dpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(6), 4, GFLAGS), + RV1108_CLKGATE_CON(6), 4, GFLAGS), COMPOSITE_NOGATE(0, "dclk_hdmiphy", mux_dclk_hdmiphy_pre_p, 0, - RK1108_CLKSEL_CON(32), 6, 2, MFLAGS, 8, 6, DFLAGS), + RV1108_CLKSEL_CON(32), 6, 2, MFLAGS, 8, 6, DFLAGS), /* * Clock-Architecture Diagram 5 @@ -262,153 +262,153 @@ static struct rockchip_clk_branch rk1108_clk_branches[] __initdata = { FACTOR(0, "xin12m", "xin24m", 0, 1, 2), COMPOSITE(0, "i2s0_src", mux_pll_src_2plls_p, 0, - RK1108_CLKSEL_CON(5), 8, 1, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(2), 0, GFLAGS), + RV1108_CLKSEL_CON(5), 8, 1, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(2), 0, GFLAGS), COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(8), 0, - RK1108_CLKGATE_CON(2), 1, GFLAGS, - &rk1108_i2s0_fracmux), + RV1108_CLKSEL_CON(8), 0, + RV1108_CLKGATE_CON(2), 1, GFLAGS, + &rv1108_i2s0_fracmux), GATE(SCLK_I2S0, "sclk_i2s0", "i2s0_pre", CLK_SET_RATE_PARENT, - RK1108_CLKGATE_CON(2), 2, GFLAGS), + RV1108_CLKGATE_CON(2), 2, GFLAGS), COMPOSITE_NODIV(0, "i2s_out", mux_i2s_out_p, 0, - RK1108_CLKSEL_CON(5), 15, 1, MFLAGS, - RK1108_CLKGATE_CON(2), 3, GFLAGS), + RV1108_CLKSEL_CON(5), 15, 1, MFLAGS, + RV1108_CLKGATE_CON(2), 3, GFLAGS), COMPOSITE(0, "i2s1_src", mux_pll_src_2plls_p, 0, - RK1108_CLKSEL_CON(6), 8, 1, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(2), 4, GFLAGS), + RV1108_CLKSEL_CON(6), 8, 1, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(2), 4, GFLAGS), COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(9), 0, RK2928_CLKGATE_CON(2), 5, GFLAGS, - &rk1108_i2s1_fracmux), + &rv1108_i2s1_fracmux), GATE(SCLK_I2S1, "sclk_i2s1", "i2s1_pre", CLK_SET_RATE_PARENT, - RK1108_CLKGATE_CON(2), 6, GFLAGS), + RV1108_CLKGATE_CON(2), 6, GFLAGS), COMPOSITE(0, "i2s2_src", mux_pll_src_2plls_p, 0, - RK1108_CLKSEL_CON(7), 8, 1, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(3), 8, GFLAGS), + RV1108_CLKSEL_CON(7), 8, 1, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(3), 8, GFLAGS), COMPOSITE_FRACMUX(0, "i2s2_frac", "i2s2_src", CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(10), 0, - RK1108_CLKGATE_CON(2), 9, GFLAGS, - &rk1108_i2s2_fracmux), + RV1108_CLKSEL_CON(10), 0, + RV1108_CLKGATE_CON(2), 9, GFLAGS, + &rv1108_i2s2_fracmux), GATE(SCLK_I2S2, "sclk_i2s2", "i2s2_pre", CLK_SET_RATE_PARENT, - RK1108_CLKGATE_CON(2), 10, GFLAGS), + RV1108_CLKGATE_CON(2), 10, GFLAGS), /* PD_BUS */ GATE(0, "aclk_bus_src_gpll", "gpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(1), 0, GFLAGS), + RV1108_CLKGATE_CON(1), 0, GFLAGS), GATE(0, "aclk_bus_src_apll", "apll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(1), 1, GFLAGS), + RV1108_CLKGATE_CON(1), 1, GFLAGS), GATE(0, "aclk_bus_src_dpll", "dpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(1), 2, GFLAGS), + RV1108_CLKGATE_CON(1), 2, GFLAGS), COMPOSITE_NOGATE(ACLK_PRE, "aclk_bus_pre", mux_aclk_bus_src_p, 0, - RK1108_CLKSEL_CON(2), 8, 2, MFLAGS, 0, 5, DFLAGS), + RV1108_CLKSEL_CON(2), 8, 2, MFLAGS, 0, 5, DFLAGS), COMPOSITE_NOMUX(0, "hclk_bus_pre", "aclk_bus_2wrap_occ", 0, - RK1108_CLKSEL_CON(3), 0, 5, DFLAGS, - RK1108_CLKGATE_CON(1), 4, GFLAGS), + RV1108_CLKSEL_CON(3), 0, 5, DFLAGS, + RV1108_CLKGATE_CON(1), 4, GFLAGS), COMPOSITE_NOMUX(0, "pclken_bus", "aclk_bus_2wrap_occ", 0, - RK1108_CLKSEL_CON(3), 8, 5, DFLAGS, - RK1108_CLKGATE_CON(1), 5, GFLAGS), + RV1108_CLKSEL_CON(3), 8, 5, DFLAGS, + RV1108_CLKGATE_CON(1), 5, GFLAGS), GATE(0, "pclk_bus_pre", "pclken_bus", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(1), 6, GFLAGS), + RV1108_CLKGATE_CON(1), 6, GFLAGS), GATE(0, "pclk_top_pre", "pclken_bus", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(1), 7, GFLAGS), + RV1108_CLKGATE_CON(1), 7, GFLAGS), GATE(0, "pclk_ddr_pre", "pclken_bus", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(1), 8, GFLAGS), + RV1108_CLKGATE_CON(1), 8, GFLAGS), GATE(0, "clk_timer0", "mux_pll_p", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(1), 9, GFLAGS), + RV1108_CLKGATE_CON(1), 9, GFLAGS), GATE(0, "clk_timer1", "mux_pll_p", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(1), 10, GFLAGS), + RV1108_CLKGATE_CON(1), 10, GFLAGS), GATE(0, "pclk_timer", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 4, GFLAGS), + RV1108_CLKGATE_CON(13), 4, GFLAGS), COMPOSITE(0, "uart0_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(13), 12, 2, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(3), 1, GFLAGS), + RV1108_CLKSEL_CON(13), 12, 2, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(3), 1, GFLAGS), COMPOSITE(0, "uart1_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(14), 12, 2, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(3), 3, GFLAGS), + RV1108_CLKSEL_CON(14), 12, 2, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(3), 3, GFLAGS), COMPOSITE(0, "uart21_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(15), 12, 2, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(3), 5, GFLAGS), + RV1108_CLKSEL_CON(15), 12, 2, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(3), 5, GFLAGS), COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(16), 0, - RK1108_CLKGATE_CON(3), 2, GFLAGS, - &rk1108_uart0_fracmux), + RV1108_CLKSEL_CON(16), 0, + RV1108_CLKGATE_CON(3), 2, GFLAGS, + &rv1108_uart0_fracmux), COMPOSITE_FRACMUX(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(17), 0, - RK1108_CLKGATE_CON(3), 4, GFLAGS, - &rk1108_uart1_fracmux), + RV1108_CLKSEL_CON(17), 0, + RV1108_CLKGATE_CON(3), 4, GFLAGS, + &rv1108_uart1_fracmux), COMPOSITE_FRACMUX(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(18), 0, - RK1108_CLKGATE_CON(3), 6, GFLAGS, - &rk1108_uart2_fracmux), + RV1108_CLKSEL_CON(18), 0, + RV1108_CLKGATE_CON(3), 6, GFLAGS, + &rv1108_uart2_fracmux), GATE(PCLK_UART0, "pclk_uart0", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 10, GFLAGS), + RV1108_CLKGATE_CON(13), 10, GFLAGS), GATE(PCLK_UART1, "pclk_uart1", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 11, GFLAGS), + RV1108_CLKGATE_CON(13), 11, GFLAGS), GATE(PCLK_UART2, "pclk_uart2", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 12, GFLAGS), + RV1108_CLKGATE_CON(13), 12, GFLAGS), COMPOSITE(0, "clk_i2c1", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(19), 15, 2, MFLAGS, 8, 7, DFLAGS, - RK1108_CLKGATE_CON(3), 7, GFLAGS), + RV1108_CLKSEL_CON(19), 15, 2, MFLAGS, 8, 7, DFLAGS, + RV1108_CLKGATE_CON(3), 7, GFLAGS), COMPOSITE(0, "clk_i2c2", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(20), 7, 2, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(3), 8, GFLAGS), + RV1108_CLKSEL_CON(20), 7, 2, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(3), 8, GFLAGS), COMPOSITE(0, "clk_i2c3", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(20), 15, 2, MFLAGS, 8, 7, DFLAGS, - RK1108_CLKGATE_CON(3), 9, GFLAGS), + RV1108_CLKSEL_CON(20), 15, 2, MFLAGS, 8, 7, DFLAGS, + RV1108_CLKGATE_CON(3), 9, GFLAGS), GATE(0, "pclk_i2c1", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 0, GFLAGS), + RV1108_CLKGATE_CON(13), 0, GFLAGS), GATE(0, "pclk_i2c2", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 1, GFLAGS), + RV1108_CLKGATE_CON(13), 1, GFLAGS), GATE(0, "pclk_i2c3", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 2, GFLAGS), + RV1108_CLKGATE_CON(13), 2, GFLAGS), COMPOSITE(0, "clk_pwm1", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(12), 15, 2, MFLAGS, 8, 7, DFLAGS, - RK1108_CLKGATE_CON(3), 10, GFLAGS), + RV1108_CLKSEL_CON(12), 15, 2, MFLAGS, 8, 7, DFLAGS, + RV1108_CLKGATE_CON(3), 10, GFLAGS), GATE(0, "pclk_pwm1", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 6, GFLAGS), + RV1108_CLKGATE_CON(13), 6, GFLAGS), GATE(0, "pclk_wdt", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 3, GFLAGS), + RV1108_CLKGATE_CON(13), 3, GFLAGS), GATE(0, "pclk_gpio1", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 7, GFLAGS), + RV1108_CLKGATE_CON(13), 7, GFLAGS), GATE(0, "pclk_gpio2", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 8, GFLAGS), + RV1108_CLKGATE_CON(13), 8, GFLAGS), GATE(0, "pclk_gpio3", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 9, GFLAGS), + RV1108_CLKGATE_CON(13), 9, GFLAGS), GATE(0, "pclk_grf", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(14), 0, GFLAGS), + RV1108_CLKGATE_CON(14), 0, GFLAGS), GATE(ACLK_DMAC, "aclk_dmac", "aclk_bus_pre", 0, - RK1108_CLKGATE_CON(12), 2, GFLAGS), + RV1108_CLKGATE_CON(12), 2, GFLAGS), GATE(0, "hclk_rom", "hclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(12), 3, GFLAGS), + RV1108_CLKGATE_CON(12), 3, GFLAGS), GATE(0, "aclk_intmem", "aclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(12), 1, GFLAGS), + RV1108_CLKGATE_CON(12), 1, GFLAGS), /* PD_DDR */ GATE(0, "apll_ddr", "apll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(0), 8, GFLAGS), + RV1108_CLKGATE_CON(0), 8, GFLAGS), GATE(0, "dpll_ddr", "dpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(0), 9, GFLAGS), + RV1108_CLKGATE_CON(0), 9, GFLAGS), GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(0), 10, GFLAGS), + RV1108_CLKGATE_CON(0), 10, GFLAGS), COMPOSITE(0, "ddrphy4x", mux_ddrphy_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(4), 8, 2, MFLAGS, 0, 3, + RV1108_CLKSEL_CON(4), 8, 2, MFLAGS, 0, 3, DFLAGS | CLK_DIVIDER_POWER_OF_TWO, - RK1108_CLKGATE_CON(10), 9, GFLAGS), + RV1108_CLKGATE_CON(10), 9, GFLAGS), GATE(0, "ddrupctl", "ddrphy_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(12), 4, GFLAGS), + RV1108_CLKGATE_CON(12), 4, GFLAGS), GATE(0, "ddrc", "ddrphy", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(12), 5, GFLAGS), + RV1108_CLKGATE_CON(12), 5, GFLAGS), GATE(0, "ddrmon", "ddrphy_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(12), 6, GFLAGS), + RV1108_CLKGATE_CON(12), 6, GFLAGS), GATE(0, "timer_clk", "xin24m", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(0), 11, GFLAGS), + RV1108_CLKGATE_CON(0), 11, GFLAGS), /* * Clock-Architecture Diagram 6 @@ -416,73 +416,73 @@ static struct rockchip_clk_branch rk1108_clk_branches[] __initdata = { /* PD_PERI */ COMPOSITE_NOMUX(0, "pclk_periph_pre", "gpll", 0, - RK1108_CLKSEL_CON(23), 10, 5, DFLAGS, - RK1108_CLKGATE_CON(4), 5, GFLAGS), + RV1108_CLKSEL_CON(23), 10, 5, DFLAGS, + RV1108_CLKGATE_CON(4), 5, GFLAGS), GATE(0, "pclk_periph", "pclk_periph_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(15), 13, GFLAGS), + RV1108_CLKGATE_CON(15), 13, GFLAGS), COMPOSITE_NOMUX(0, "hclk_periph_pre", "gpll", 0, - RK1108_CLKSEL_CON(23), 5, 5, DFLAGS, - RK1108_CLKGATE_CON(4), 4, GFLAGS), + RV1108_CLKSEL_CON(23), 5, 5, DFLAGS, + RV1108_CLKGATE_CON(4), 4, GFLAGS), GATE(0, "hclk_periph", "hclk_periph_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(15), 12, GFLAGS), + RV1108_CLKGATE_CON(15), 12, GFLAGS), GATE(0, "aclk_peri_src_dpll", "dpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(4), 1, GFLAGS), + RV1108_CLKGATE_CON(4), 1, GFLAGS), GATE(0, "aclk_peri_src_gpll", "gpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(4), 2, GFLAGS), + RV1108_CLKGATE_CON(4), 2, GFLAGS), COMPOSITE(0, "aclk_periph", mux_aclk_peri_src_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(23), 15, 2, MFLAGS, 0, 5, DFLAGS, - RK1108_CLKGATE_CON(15), 11, GFLAGS), + RV1108_CLKSEL_CON(23), 15, 2, MFLAGS, 0, 5, DFLAGS, + RV1108_CLKGATE_CON(15), 11, GFLAGS), COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0, - RK1108_CLKSEL_CON(25), 8, 2, MFLAGS, 0, 8, DFLAGS, - RK1108_CLKGATE_CON(5), 0, GFLAGS), + RV1108_CLKSEL_CON(25), 8, 2, MFLAGS, 0, 8, DFLAGS, + RV1108_CLKGATE_CON(5), 0, GFLAGS), COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0, - RK1108_CLKSEL_CON(25), 10, 2, MFLAGS, - RK1108_CLKGATE_CON(5), 2, GFLAGS), + RV1108_CLKSEL_CON(25), 10, 2, MFLAGS, + RV1108_CLKGATE_CON(5), 2, GFLAGS), DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0, - RK1108_CLKSEL_CON(26), 0, 8, DFLAGS), + RV1108_CLKSEL_CON(26), 0, 8, DFLAGS), COMPOSITE_NODIV(0, "sclk_emmc_src", mux_mmc_src_p, 0, - RK1108_CLKSEL_CON(25), 12, 2, MFLAGS, - RK1108_CLKGATE_CON(5), 1, GFLAGS), + RV1108_CLKSEL_CON(25), 12, 2, MFLAGS, + RV1108_CLKGATE_CON(5), 1, GFLAGS), DIV(SCLK_EMMC, "sclk_emmc", "sclk_emmc_src", 0, RK2928_CLKSEL_CON(26), 8, 8, DFLAGS), - GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 0, GFLAGS), - GATE(HCLK_SDIO, "hclk_sdio", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 1, GFLAGS), - GATE(HCLK_EMMC, "hclk_emmc", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 2, GFLAGS), + GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 0, GFLAGS), + GATE(HCLK_SDIO, "hclk_sdio", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 1, GFLAGS), + GATE(HCLK_EMMC, "hclk_emmc", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 2, GFLAGS), COMPOSITE(SCLK_NANDC, "sclk_nandc", mux_pll_src_2plls_p, 0, - RK1108_CLKSEL_CON(27), 14, 2, MFLAGS, 8, 5, DFLAGS, - RK1108_CLKGATE_CON(5), 3, GFLAGS), - GATE(HCLK_NANDC, "hclk_nandc", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 3, GFLAGS), + RV1108_CLKSEL_CON(27), 14, 2, MFLAGS, 8, 5, DFLAGS, + RV1108_CLKGATE_CON(5), 3, GFLAGS), + GATE(HCLK_NANDC, "hclk_nandc", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 3, GFLAGS), COMPOSITE(SCLK_SFC, "sclk_sfc", mux_pll_src_2plls_p, 0, - RK1108_CLKSEL_CON(27), 7, 2, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(5), 4, GFLAGS), - GATE(HCLK_SFC, "hclk_sfc", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 10, GFLAGS), + RV1108_CLKSEL_CON(27), 7, 2, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(5), 4, GFLAGS), + GATE(HCLK_SFC, "hclk_sfc", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 10, GFLAGS), COMPOSITE(0, "sclk_macphy_pre", mux_pll_src_apll_gpll_p, 0, - RK1108_CLKSEL_CON(24), 12, 2, MFLAGS, 0, 5, DFLAGS, - RK1108_CLKGATE_CON(4), 10, GFLAGS), + RV1108_CLKSEL_CON(24), 12, 2, MFLAGS, 0, 5, DFLAGS, + RV1108_CLKGATE_CON(4), 10, GFLAGS), MUX(0, "sclk_macphy", mux_sclk_macphy_p, CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(24), 8, 2, MFLAGS), - GATE(0, "sclk_macphy_rx", "sclk_macphy", 0, RK1108_CLKGATE_CON(4), 8, GFLAGS), - GATE(0, "sclk_mac_ref", "sclk_macphy", 0, RK1108_CLKGATE_CON(4), 6, GFLAGS), - GATE(0, "sclk_mac_refout", "sclk_macphy", 0, RK1108_CLKGATE_CON(4), 7, GFLAGS), + RV1108_CLKSEL_CON(24), 8, 2, MFLAGS), + GATE(0, "sclk_macphy_rx", "sclk_macphy", 0, RV1108_CLKGATE_CON(4), 8, GFLAGS), + GATE(0, "sclk_mac_ref", "sclk_macphy", 0, RV1108_CLKGATE_CON(4), 6, GFLAGS), + GATE(0, "sclk_mac_refout", "sclk_macphy", 0, RV1108_CLKGATE_CON(4), 7, GFLAGS), - MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RK1108_SDMMC_CON0, 1), - MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK1108_SDMMC_CON1, 1), + MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RV1108_SDMMC_CON0, 1), + MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RV1108_SDMMC_CON1, 1), - MMC(SCLK_SDIO_DRV, "sdio_drv", "sclk_sdio", RK1108_SDIO_CON0, 1), - MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio", RK1108_SDIO_CON1, 1), + MMC(SCLK_SDIO_DRV, "sdio_drv", "sclk_sdio", RV1108_SDIO_CON0, 1), + MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio", RV1108_SDIO_CON1, 1), - MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RK1108_EMMC_CON0, 1), - MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK1108_EMMC_CON1, 1), + MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RV1108_EMMC_CON0, 1), + MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RV1108_EMMC_CON1, 1), }; -static const char *const rk1108_critical_clocks[] __initconst = { +static const char *const rv1108_critical_clocks[] __initconst = { "aclk_core", "aclk_bus_src_gpll", "aclk_periph", @@ -490,7 +490,7 @@ static const char *const rk1108_critical_clocks[] __initconst = { "pclk_periph", }; -static void __init rk1108_clk_init(struct device_node *np) +static void __init rv1108_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; void __iomem *reg_base; @@ -508,24 +508,24 @@ static void __init rk1108_clk_init(struct device_node *np) return; } - rockchip_clk_register_plls(ctx, rk1108_pll_clks, - ARRAY_SIZE(rk1108_pll_clks), - RK1108_GRF_SOC_STATUS0); - rockchip_clk_register_branches(ctx, rk1108_clk_branches, - ARRAY_SIZE(rk1108_clk_branches)); - rockchip_clk_protect_critical(rk1108_critical_clocks, - ARRAY_SIZE(rk1108_critical_clocks)); + rockchip_clk_register_plls(ctx, rv1108_pll_clks, + ARRAY_SIZE(rv1108_pll_clks), + RV1108_GRF_SOC_STATUS0); + rockchip_clk_register_branches(ctx, rv1108_clk_branches, + ARRAY_SIZE(rv1108_clk_branches)); + rockchip_clk_protect_critical(rv1108_critical_clocks, + ARRAY_SIZE(rv1108_critical_clocks)); rockchip_clk_register_armclk(ctx, ARMCLK, "armclk", mux_armclk_p, ARRAY_SIZE(mux_armclk_p), - &rk1108_cpuclk_data, rk1108_cpuclk_rates, - ARRAY_SIZE(rk1108_cpuclk_rates)); + &rv1108_cpuclk_data, rv1108_cpuclk_rates, + ARRAY_SIZE(rv1108_cpuclk_rates)); - rockchip_register_softrst(np, 13, reg_base + RK1108_SOFTRST_CON(0), + rockchip_register_softrst(np, 13, reg_base + RV1108_SOFTRST_CON(0), ROCKCHIP_SOFTRST_HIWORD_MASK); - rockchip_register_restart_notifier(ctx, RK1108_GLB_SRST_FST, NULL); + rockchip_register_restart_notifier(ctx, RV1108_GLB_SRST_FST, NULL); rockchip_clk_of_add_provider(np, ctx); } -CLK_OF_DECLARE(rk1108_cru, "rockchip,rk1108-cru", rk1108_clk_init); +CLK_OF_DECLARE(rv1108_cru, "rockchip,rv1108-cru", rv1108_clk_init); diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h index 7c15473ea72b..ef601dded32c 100644 --- a/drivers/clk/rockchip/clk.h +++ b/drivers/clk/rockchip/clk.h @@ -34,20 +34,20 @@ struct clk; #define HIWORD_UPDATE(val, mask, shift) \ ((val) << (shift) | (mask) << ((shift) + 16)) -/* register positions shared by RK1108, RK2928, RK3036, RK3066, RK3188 and RK3228 */ -#define RK1108_PLL_CON(x) ((x) * 0x4) -#define RK1108_CLKSEL_CON(x) ((x) * 0x4 + 0x60) -#define RK1108_CLKGATE_CON(x) ((x) * 0x4 + 0x120) -#define RK1108_SOFTRST_CON(x) ((x) * 0x4 + 0x180) -#define RK1108_GLB_SRST_FST 0x1c0 -#define RK1108_GLB_SRST_SND 0x1c4 -#define RK1108_MISC_CON 0x1cc -#define RK1108_SDMMC_CON0 0x1d8 -#define RK1108_SDMMC_CON1 0x1dc -#define RK1108_SDIO_CON0 0x1e0 -#define RK1108_SDIO_CON1 0x1e4 -#define RK1108_EMMC_CON0 0x1e8 -#define RK1108_EMMC_CON1 0x1ec +/* register positions shared by RV1108, RK2928, RK3036, RK3066, RK3188 and RK3228 */ +#define RV1108_PLL_CON(x) ((x) * 0x4) +#define RV1108_CLKSEL_CON(x) ((x) * 0x4 + 0x60) +#define RV1108_CLKGATE_CON(x) ((x) * 0x4 + 0x120) +#define RV1108_SOFTRST_CON(x) ((x) * 0x4 + 0x180) +#define RV1108_GLB_SRST_FST 0x1c0 +#define RV1108_GLB_SRST_SND 0x1c4 +#define RV1108_MISC_CON 0x1cc +#define RV1108_SDMMC_CON0 0x1d8 +#define RV1108_SDMMC_CON1 0x1dc +#define RV1108_SDIO_CON0 0x1e0 +#define RV1108_SDIO_CON1 0x1e4 +#define RV1108_EMMC_CON0 0x1e8 +#define RV1108_EMMC_CON1 0x1ec #define RK2928_PLL_CON(x) ((x) * 0x4) #define RK2928_MODE_CON 0x40 diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c index 7c9383c3c2c6..f911d9f77763 100644 --- a/drivers/clk/spear/spear6xx_clock.c +++ b/drivers/clk/spear/spear6xx_clock.c @@ -313,7 +313,7 @@ void __init spear6xx_clk_init(void __iomem *misc_base) /* clock derived from apb clk */ clk = clk_register_gate(NULL, "adc_clk", "apb_clk", 0, PERIP1_CLK_ENB, ADC_CLK_ENB, 0, &_lock); - clk_register_clkdev(clk, NULL, "adc"); + clk_register_clkdev(clk, NULL, "d820b000.adc"); clk = clk_register_fixed_factor(NULL, "gpio0_clk", "apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "f0100000.gpio"); diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig index a077ab6edffa..b0d551a8efe4 100644 --- a/drivers/clk/sunxi-ng/Kconfig +++ b/drivers/clk/sunxi-ng/Kconfig @@ -64,6 +64,7 @@ config SUN50I_A64_CCU select SUNXI_CCU_MP select SUNXI_CCU_PHASE default ARM64 && ARCH_SUNXI + depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST config SUN5I_CCU bool "Support for the Allwinner sun5i family CCM" @@ -75,6 +76,7 @@ config SUN5I_CCU select SUNXI_CCU_MP select SUNXI_CCU_PHASE default MACH_SUN5I + depends on MACH_SUN5I || COMPILE_TEST config SUN6I_A31_CCU bool "Support for the Allwinner A31/A31s CCU" @@ -86,6 +88,7 @@ config SUN6I_A31_CCU select SUNXI_CCU_MP select SUNXI_CCU_PHASE default MACH_SUN6I + depends on MACH_SUN6I || COMPILE_TEST config SUN8I_A23_CCU bool "Support for the Allwinner A23 CCU" @@ -98,6 +101,7 @@ config SUN8I_A23_CCU select SUNXI_CCU_MP select SUNXI_CCU_PHASE default MACH_SUN8I + depends on MACH_SUN8I || COMPILE_TEST config SUN8I_A33_CCU bool "Support for the Allwinner A33 CCU" @@ -110,6 +114,7 @@ config SUN8I_A33_CCU select SUNXI_CCU_MP select SUNXI_CCU_PHASE default MACH_SUN8I + depends on MACH_SUN8I || COMPILE_TEST config SUN8I_H3_CCU bool "Support for the Allwinner H3 CCU" @@ -120,7 +125,8 @@ config SUN8I_H3_CCU select SUNXI_CCU_NM select SUNXI_CCU_MP select SUNXI_CCU_PHASE - default MACH_SUN8I + default MACH_SUN8I || (ARM64 && ARCH_SUNXI) + depends on MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST config SUN8I_V3S_CCU bool "Support for the Allwinner V3s CCU" @@ -132,6 +138,7 @@ config SUN8I_V3S_CCU select SUNXI_CCU_MP select SUNXI_CCU_PHASE default MACH_SUN8I + depends on MACH_SUN8I || COMPILE_TEST config SUN9I_A80_CCU bool "Support for the Allwinner A80 CCU" @@ -143,5 +150,12 @@ config SUN9I_A80_CCU select SUNXI_CCU_MP select SUNXI_CCU_PHASE default MACH_SUN9I + depends on MACH_SUN9I || COMPILE_TEST + +config SUN8I_R_CCU + bool "Support for Allwinner SoCs' PRCM CCUs" + select SUNXI_CCU_DIV + select SUNXI_CCU_GATE + default MACH_SUN8I || (ARCH_SUNXI && ARM64) endif diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile index 6feaac0c5600..0ec02fe14c50 100644 --- a/drivers/clk/sunxi-ng/Makefile +++ b/drivers/clk/sunxi-ng/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_SUN8I_A23_CCU) += ccu-sun8i-a23.o obj-$(CONFIG_SUN8I_A33_CCU) += ccu-sun8i-a33.o obj-$(CONFIG_SUN8I_H3_CCU) += ccu-sun8i-h3.o obj-$(CONFIG_SUN8I_V3S_CCU) += ccu-sun8i-v3s.o +obj-$(CONFIG_SUN8I_R_CCU) += ccu-sun8i-r.o obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c index 06edaa523479..5c476f966a72 100644 --- a/drivers/clk/sunxi-ng/ccu-sun5i.c +++ b/drivers/clk/sunxi-ng/ccu-sun5i.c @@ -469,7 +469,7 @@ static const char * const csi_parents[] = { "hosc", "pll-video0", "pll-video1", static const u8 csi_table[] = { 0, 1, 2, 5, 6 }; static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi_clk, "csi", csi_parents, csi_table, - 0x134, 0, 5, 24, 2, BIT(31), 0); + 0x134, 0, 5, 24, 3, BIT(31), 0); static SUNXI_CCU_GATE(ve_clk, "ve", "pll-ve", 0x13c, BIT(31), CLK_SET_RATE_PARENT); diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c index 2c69b631967a..8d38e6510e29 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c @@ -159,13 +159,17 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de", BIT(28), /* lock */ CLK_SET_RATE_UNGATE); -/* TODO: Fix N */ -static SUNXI_CCU_N_WITH_GATE_LOCK(pll_ddr1_clk, "pll-ddr1", - "osc24M", 0x04c, - 8, 6, /* N */ - BIT(31), /* gate */ - BIT(28), /* lock */ - CLK_SET_RATE_UNGATE); +static struct ccu_mult pll_ddr1_clk = { + .enable = BIT(31), + .lock = BIT(28), + .mult = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 6, 0, 12, 0), + .common = { + .reg = 0x04c, + .hw.init = CLK_HW_INIT("pll-ddr1", "osc24M", + &ccu_mult_ops, + CLK_SET_RATE_UNGATE), + }, +}; static const char * const cpux_parents[] = { "osc32k", "osc24M", "pll-cpux" , "pll-cpux" }; diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c index a26c8a19fe93..4cbc1b701b7c 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c @@ -300,8 +300,10 @@ static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2", 0x06c, BIT(18), 0); static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb2", 0x06c, BIT(19), 0); -static SUNXI_CCU_GATE(bus_scr_clk, "bus-scr", "apb2", +static SUNXI_CCU_GATE(bus_scr0_clk, "bus-scr0", "apb2", 0x06c, BIT(20), 0); +static SUNXI_CCU_GATE(bus_scr1_clk, "bus-scr1", "apb2", + 0x06c, BIT(21), 0); static SUNXI_CCU_GATE(bus_ephy_clk, "bus-ephy", "ahb1", 0x070, BIT(0), 0); @@ -546,7 +548,7 @@ static struct ccu_common *sun8i_h3_ccu_clks[] = { &bus_uart1_clk.common, &bus_uart2_clk.common, &bus_uart3_clk.common, - &bus_scr_clk.common, + &bus_scr0_clk.common, &bus_ephy_clk.common, &bus_dbg_clk.common, &ths_clk.common, @@ -597,6 +599,114 @@ static struct ccu_common *sun8i_h3_ccu_clks[] = { &gpu_clk.common, }; +static struct ccu_common *sun50i_h5_ccu_clks[] = { + &pll_cpux_clk.common, + &pll_audio_base_clk.common, + &pll_video_clk.common, + &pll_ve_clk.common, + &pll_ddr_clk.common, + &pll_periph0_clk.common, + &pll_gpu_clk.common, + &pll_periph1_clk.common, + &pll_de_clk.common, + &cpux_clk.common, + &axi_clk.common, + &ahb1_clk.common, + &apb1_clk.common, + &apb2_clk.common, + &ahb2_clk.common, + &bus_ce_clk.common, + &bus_dma_clk.common, + &bus_mmc0_clk.common, + &bus_mmc1_clk.common, + &bus_mmc2_clk.common, + &bus_nand_clk.common, + &bus_dram_clk.common, + &bus_emac_clk.common, + &bus_ts_clk.common, + &bus_hstimer_clk.common, + &bus_spi0_clk.common, + &bus_spi1_clk.common, + &bus_otg_clk.common, + &bus_ehci0_clk.common, + &bus_ehci1_clk.common, + &bus_ehci2_clk.common, + &bus_ehci3_clk.common, + &bus_ohci0_clk.common, + &bus_ohci1_clk.common, + &bus_ohci2_clk.common, + &bus_ohci3_clk.common, + &bus_ve_clk.common, + &bus_tcon0_clk.common, + &bus_tcon1_clk.common, + &bus_deinterlace_clk.common, + &bus_csi_clk.common, + &bus_tve_clk.common, + &bus_hdmi_clk.common, + &bus_de_clk.common, + &bus_gpu_clk.common, + &bus_msgbox_clk.common, + &bus_spinlock_clk.common, + &bus_codec_clk.common, + &bus_spdif_clk.common, + &bus_pio_clk.common, + &bus_ths_clk.common, + &bus_i2s0_clk.common, + &bus_i2s1_clk.common, + &bus_i2s2_clk.common, + &bus_i2c0_clk.common, + &bus_i2c1_clk.common, + &bus_i2c2_clk.common, + &bus_uart0_clk.common, + &bus_uart1_clk.common, + &bus_uart2_clk.common, + &bus_uart3_clk.common, + &bus_scr0_clk.common, + &bus_scr1_clk.common, + &bus_ephy_clk.common, + &bus_dbg_clk.common, + &ths_clk.common, + &nand_clk.common, + &mmc0_clk.common, + &mmc1_clk.common, + &mmc2_clk.common, + &ts_clk.common, + &ce_clk.common, + &spi0_clk.common, + &spi1_clk.common, + &i2s0_clk.common, + &i2s1_clk.common, + &i2s2_clk.common, + &spdif_clk.common, + &usb_phy0_clk.common, + &usb_phy1_clk.common, + &usb_phy2_clk.common, + &usb_phy3_clk.common, + &usb_ohci0_clk.common, + &usb_ohci1_clk.common, + &usb_ohci2_clk.common, + &usb_ohci3_clk.common, + &dram_clk.common, + &dram_ve_clk.common, + &dram_csi_clk.common, + &dram_deinterlace_clk.common, + &dram_ts_clk.common, + &de_clk.common, + &tcon_clk.common, + &tve_clk.common, + &deinterlace_clk.common, + &csi_misc_clk.common, + &csi_sclk_clk.common, + &csi_mclk_clk.common, + &ve_clk.common, + &ac_dig_clk.common, + &avs_clk.common, + &hdmi_clk.common, + &hdmi_ddc_clk.common, + &mbus_clk.common, + &gpu_clk.common, +}; + /* We hardcode the divider to 4 for now */ static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT); @@ -677,7 +787,7 @@ static struct clk_hw_onecell_data sun8i_h3_hw_clks = { [CLK_BUS_UART1] = &bus_uart1_clk.common.hw, [CLK_BUS_UART2] = &bus_uart2_clk.common.hw, [CLK_BUS_UART3] = &bus_uart3_clk.common.hw, - [CLK_BUS_SCR] = &bus_scr_clk.common.hw, + [CLK_BUS_SCR0] = &bus_scr0_clk.common.hw, [CLK_BUS_EPHY] = &bus_ephy_clk.common.hw, [CLK_BUS_DBG] = &bus_dbg_clk.common.hw, [CLK_THS] = &ths_clk.common.hw, @@ -727,7 +837,123 @@ static struct clk_hw_onecell_data sun8i_h3_hw_clks = { [CLK_MBUS] = &mbus_clk.common.hw, [CLK_GPU] = &gpu_clk.common.hw, }, - .num = CLK_NUMBER, + .num = CLK_NUMBER_H3, +}; + +static struct clk_hw_onecell_data sun50i_h5_hw_clks = { + .hws = { + [CLK_PLL_CPUX] = &pll_cpux_clk.common.hw, + [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw, + [CLK_PLL_AUDIO] = &pll_audio_clk.hw, + [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw, + [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw, + [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw, + [CLK_PLL_VIDEO] = &pll_video_clk.common.hw, + [CLK_PLL_VE] = &pll_ve_clk.common.hw, + [CLK_PLL_DDR] = &pll_ddr_clk.common.hw, + [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw, + [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw, + [CLK_PLL_GPU] = &pll_gpu_clk.common.hw, + [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw, + [CLK_PLL_DE] = &pll_de_clk.common.hw, + [CLK_CPUX] = &cpux_clk.common.hw, + [CLK_AXI] = &axi_clk.common.hw, + [CLK_AHB1] = &ahb1_clk.common.hw, + [CLK_APB1] = &apb1_clk.common.hw, + [CLK_APB2] = &apb2_clk.common.hw, + [CLK_AHB2] = &ahb2_clk.common.hw, + [CLK_BUS_CE] = &bus_ce_clk.common.hw, + [CLK_BUS_DMA] = &bus_dma_clk.common.hw, + [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw, + [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw, + [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw, + [CLK_BUS_NAND] = &bus_nand_clk.common.hw, + [CLK_BUS_DRAM] = &bus_dram_clk.common.hw, + [CLK_BUS_EMAC] = &bus_emac_clk.common.hw, + [CLK_BUS_TS] = &bus_ts_clk.common.hw, + [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw, + [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw, + [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw, + [CLK_BUS_OTG] = &bus_otg_clk.common.hw, + [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw, + [CLK_BUS_EHCI1] = &bus_ehci1_clk.common.hw, + [CLK_BUS_EHCI2] = &bus_ehci2_clk.common.hw, + [CLK_BUS_EHCI3] = &bus_ehci3_clk.common.hw, + [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw, + [CLK_BUS_OHCI1] = &bus_ohci1_clk.common.hw, + [CLK_BUS_OHCI2] = &bus_ohci2_clk.common.hw, + [CLK_BUS_OHCI3] = &bus_ohci3_clk.common.hw, + [CLK_BUS_VE] = &bus_ve_clk.common.hw, + [CLK_BUS_TCON0] = &bus_tcon0_clk.common.hw, + [CLK_BUS_TCON1] = &bus_tcon1_clk.common.hw, + [CLK_BUS_DEINTERLACE] = &bus_deinterlace_clk.common.hw, + [CLK_BUS_CSI] = &bus_csi_clk.common.hw, + [CLK_BUS_TVE] = &bus_tve_clk.common.hw, + [CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw, + [CLK_BUS_DE] = &bus_de_clk.common.hw, + [CLK_BUS_GPU] = &bus_gpu_clk.common.hw, + [CLK_BUS_MSGBOX] = &bus_msgbox_clk.common.hw, + [CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw, + [CLK_BUS_CODEC] = &bus_codec_clk.common.hw, + [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw, + [CLK_BUS_PIO] = &bus_pio_clk.common.hw, + [CLK_BUS_THS] = &bus_ths_clk.common.hw, + [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw, + [CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw, + [CLK_BUS_I2S2] = &bus_i2s2_clk.common.hw, + [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw, + [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw, + [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw, + [CLK_BUS_UART0] = &bus_uart0_clk.common.hw, + [CLK_BUS_UART1] = &bus_uart1_clk.common.hw, + [CLK_BUS_UART2] = &bus_uart2_clk.common.hw, + [CLK_BUS_UART3] = &bus_uart3_clk.common.hw, + [CLK_BUS_SCR0] = &bus_scr0_clk.common.hw, + [CLK_BUS_SCR1] = &bus_scr1_clk.common.hw, + [CLK_BUS_EPHY] = &bus_ephy_clk.common.hw, + [CLK_BUS_DBG] = &bus_dbg_clk.common.hw, + [CLK_THS] = &ths_clk.common.hw, + [CLK_NAND] = &nand_clk.common.hw, + [CLK_MMC0] = &mmc0_clk.common.hw, + [CLK_MMC1] = &mmc1_clk.common.hw, + [CLK_MMC2] = &mmc2_clk.common.hw, + [CLK_TS] = &ts_clk.common.hw, + [CLK_CE] = &ce_clk.common.hw, + [CLK_SPI0] = &spi0_clk.common.hw, + [CLK_SPI1] = &spi1_clk.common.hw, + [CLK_I2S0] = &i2s0_clk.common.hw, + [CLK_I2S1] = &i2s1_clk.common.hw, + [CLK_I2S2] = &i2s2_clk.common.hw, + [CLK_SPDIF] = &spdif_clk.common.hw, + [CLK_USB_PHY0] = &usb_phy0_clk.common.hw, + [CLK_USB_PHY1] = &usb_phy1_clk.common.hw, + [CLK_USB_PHY2] = &usb_phy2_clk.common.hw, + [CLK_USB_PHY3] = &usb_phy3_clk.common.hw, + [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw, + [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw, + [CLK_USB_OHCI2] = &usb_ohci2_clk.common.hw, + [CLK_USB_OHCI3] = &usb_ohci3_clk.common.hw, + [CLK_DRAM] = &dram_clk.common.hw, + [CLK_DRAM_VE] = &dram_ve_clk.common.hw, + [CLK_DRAM_CSI] = &dram_csi_clk.common.hw, + [CLK_DRAM_DEINTERLACE] = &dram_deinterlace_clk.common.hw, + [CLK_DRAM_TS] = &dram_ts_clk.common.hw, + [CLK_DE] = &de_clk.common.hw, + [CLK_TCON0] = &tcon_clk.common.hw, + [CLK_TVE] = &tve_clk.common.hw, + [CLK_DEINTERLACE] = &deinterlace_clk.common.hw, + [CLK_CSI_MISC] = &csi_misc_clk.common.hw, + [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw, + [CLK_CSI_MCLK] = &csi_mclk_clk.common.hw, + [CLK_VE] = &ve_clk.common.hw, + [CLK_AC_DIG] = &ac_dig_clk.common.hw, + [CLK_AVS] = &avs_clk.common.hw, + [CLK_HDMI] = &hdmi_clk.common.hw, + [CLK_HDMI_DDC] = &hdmi_ddc_clk.common.hw, + [CLK_MBUS] = &mbus_clk.common.hw, + [CLK_GPU] = &gpu_clk.common.hw, + }, + .num = CLK_NUMBER_H5, }; static struct ccu_reset_map sun8i_h3_ccu_resets[] = { @@ -790,7 +1016,71 @@ static struct ccu_reset_map sun8i_h3_ccu_resets[] = { [RST_BUS_UART1] = { 0x2d8, BIT(17) }, [RST_BUS_UART2] = { 0x2d8, BIT(18) }, [RST_BUS_UART3] = { 0x2d8, BIT(19) }, - [RST_BUS_SCR] = { 0x2d8, BIT(20) }, + [RST_BUS_SCR0] = { 0x2d8, BIT(20) }, +}; + +static struct ccu_reset_map sun50i_h5_ccu_resets[] = { + [RST_USB_PHY0] = { 0x0cc, BIT(0) }, + [RST_USB_PHY1] = { 0x0cc, BIT(1) }, + [RST_USB_PHY2] = { 0x0cc, BIT(2) }, + [RST_USB_PHY3] = { 0x0cc, BIT(3) }, + + [RST_MBUS] = { 0x0fc, BIT(31) }, + + [RST_BUS_CE] = { 0x2c0, BIT(5) }, + [RST_BUS_DMA] = { 0x2c0, BIT(6) }, + [RST_BUS_MMC0] = { 0x2c0, BIT(8) }, + [RST_BUS_MMC1] = { 0x2c0, BIT(9) }, + [RST_BUS_MMC2] = { 0x2c0, BIT(10) }, + [RST_BUS_NAND] = { 0x2c0, BIT(13) }, + [RST_BUS_DRAM] = { 0x2c0, BIT(14) }, + [RST_BUS_EMAC] = { 0x2c0, BIT(17) }, + [RST_BUS_TS] = { 0x2c0, BIT(18) }, + [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) }, + [RST_BUS_SPI0] = { 0x2c0, BIT(20) }, + [RST_BUS_SPI1] = { 0x2c0, BIT(21) }, + [RST_BUS_OTG] = { 0x2c0, BIT(23) }, + [RST_BUS_EHCI0] = { 0x2c0, BIT(24) }, + [RST_BUS_EHCI1] = { 0x2c0, BIT(25) }, + [RST_BUS_EHCI2] = { 0x2c0, BIT(26) }, + [RST_BUS_EHCI3] = { 0x2c0, BIT(27) }, + [RST_BUS_OHCI0] = { 0x2c0, BIT(28) }, + [RST_BUS_OHCI1] = { 0x2c0, BIT(29) }, + [RST_BUS_OHCI2] = { 0x2c0, BIT(30) }, + [RST_BUS_OHCI3] = { 0x2c0, BIT(31) }, + + [RST_BUS_VE] = { 0x2c4, BIT(0) }, + [RST_BUS_TCON0] = { 0x2c4, BIT(3) }, + [RST_BUS_TCON1] = { 0x2c4, BIT(4) }, + [RST_BUS_DEINTERLACE] = { 0x2c4, BIT(5) }, + [RST_BUS_CSI] = { 0x2c4, BIT(8) }, + [RST_BUS_TVE] = { 0x2c4, BIT(9) }, + [RST_BUS_HDMI0] = { 0x2c4, BIT(10) }, + [RST_BUS_HDMI1] = { 0x2c4, BIT(11) }, + [RST_BUS_DE] = { 0x2c4, BIT(12) }, + [RST_BUS_GPU] = { 0x2c4, BIT(20) }, + [RST_BUS_MSGBOX] = { 0x2c4, BIT(21) }, + [RST_BUS_SPINLOCK] = { 0x2c4, BIT(22) }, + [RST_BUS_DBG] = { 0x2c4, BIT(31) }, + + [RST_BUS_EPHY] = { 0x2c8, BIT(2) }, + + [RST_BUS_CODEC] = { 0x2d0, BIT(0) }, + [RST_BUS_SPDIF] = { 0x2d0, BIT(1) }, + [RST_BUS_THS] = { 0x2d0, BIT(8) }, + [RST_BUS_I2S0] = { 0x2d0, BIT(12) }, + [RST_BUS_I2S1] = { 0x2d0, BIT(13) }, + [RST_BUS_I2S2] = { 0x2d0, BIT(14) }, + + [RST_BUS_I2C0] = { 0x2d8, BIT(0) }, + [RST_BUS_I2C1] = { 0x2d8, BIT(1) }, + [RST_BUS_I2C2] = { 0x2d8, BIT(2) }, + [RST_BUS_UART0] = { 0x2d8, BIT(16) }, + [RST_BUS_UART1] = { 0x2d8, BIT(17) }, + [RST_BUS_UART2] = { 0x2d8, BIT(18) }, + [RST_BUS_UART3] = { 0x2d8, BIT(19) }, + [RST_BUS_SCR0] = { 0x2d8, BIT(20) }, + [RST_BUS_SCR1] = { 0x2d8, BIT(20) }, }; static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = { @@ -803,6 +1093,16 @@ static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = { .num_resets = ARRAY_SIZE(sun8i_h3_ccu_resets), }; +static const struct sunxi_ccu_desc sun50i_h5_ccu_desc = { + .ccu_clks = sun50i_h5_ccu_clks, + .num_ccu_clks = ARRAY_SIZE(sun50i_h5_ccu_clks), + + .hw_clks = &sun50i_h5_hw_clks, + + .resets = sun50i_h5_ccu_resets, + .num_resets = ARRAY_SIZE(sun50i_h5_ccu_resets), +}; + static struct ccu_mux_nb sun8i_h3_cpu_nb = { .common = &cpux_clk.common, .cm = &cpux_clk.mux, @@ -810,7 +1110,8 @@ static struct ccu_mux_nb sun8i_h3_cpu_nb = { .bypass_index = 1, /* index of 24 MHz oscillator */ }; -static void __init sun8i_h3_ccu_setup(struct device_node *node) +static void __init sunxi_h3_h5_ccu_init(struct device_node *node, + const struct sunxi_ccu_desc *desc) { void __iomem *reg; u32 val; @@ -827,10 +1128,22 @@ static void __init sun8i_h3_ccu_setup(struct device_node *node) val &= ~GENMASK(19, 16); writel(val | (3 << 16), reg + SUN8I_H3_PLL_AUDIO_REG); - sunxi_ccu_probe(node, reg, &sun8i_h3_ccu_desc); + sunxi_ccu_probe(node, reg, desc); ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk, &sun8i_h3_cpu_nb); } + +static void __init sun8i_h3_ccu_setup(struct device_node *node) +{ + sunxi_h3_h5_ccu_init(node, &sun8i_h3_ccu_desc); +} CLK_OF_DECLARE(sun8i_h3_ccu, "allwinner,sun8i-h3-ccu", sun8i_h3_ccu_setup); + +static void __init sun50i_h5_ccu_setup(struct device_node *node) +{ + sunxi_h3_h5_ccu_init(node, &sun50i_h5_ccu_desc); +} +CLK_OF_DECLARE(sun50i_h5_ccu, "allwinner,sun50i-h5-ccu", + sun50i_h5_ccu_setup); diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h index 78be712c7487..85973d1e8165 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h @@ -57,6 +57,7 @@ /* And the GPU module clock is exported */ -#define CLK_NUMBER (CLK_GPU + 1) +#define CLK_NUMBER_H3 (CLK_GPU + 1) +#define CLK_NUMBER_H5 (CLK_BUS_SCR1 + 1) #endif /* _CCU_SUN8I_H3_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c new file mode 100644 index 000000000000..119f47b568ea --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2016 Icenowy Zheng <icenowy@aosc.xyz> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk-provider.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> + +#include "ccu_common.h" +#include "ccu_reset.h" + +#include "ccu_div.h" +#include "ccu_gate.h" +#include "ccu_mp.h" +#include "ccu_nm.h" + +#include "ccu-sun8i-r.h" + +static const char * const ar100_parents[] = { "osc32k", "osc24M", + "pll-periph0", "iosc" }; + +static struct ccu_div ar100_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO), + + .mux = { + .shift = 16, + .width = 2, + + .variable_prediv = { + .index = 2, + .shift = 8, + .width = 5, + }, + }, + + .common = { + .reg = 0x00, + .features = CCU_FEATURE_VARIABLE_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("ar100", + ar100_parents, + &ccu_div_ops, + 0), + }, +}; + +static CLK_FIXED_FACTOR(ahb0_clk, "ahb0", "ar100", 1, 1, 0); + +static struct ccu_div apb0_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO), + + .common = { + .reg = 0x0c, + .hw.init = CLK_HW_INIT("apb0", + "ahb0", + &ccu_div_ops, + 0), + }, +}; + +static SUNXI_CCU_GATE(apb0_pio_clk, "apb0-pio", "apb0", + 0x28, BIT(0), 0); +static SUNXI_CCU_GATE(apb0_ir_clk, "apb0-ir", "apb0", + 0x28, BIT(1), 0); +static SUNXI_CCU_GATE(apb0_timer_clk, "apb0-timer", "apb0", + 0x28, BIT(2), 0); +static SUNXI_CCU_GATE(apb0_rsb_clk, "apb0-rsb", "apb0", + 0x28, BIT(3), 0); +static SUNXI_CCU_GATE(apb0_uart_clk, "apb0-uart", "apb0", + 0x28, BIT(4), 0); +static SUNXI_CCU_GATE(apb0_i2c_clk, "apb0-i2c", "apb0", + 0x28, BIT(6), 0); +static SUNXI_CCU_GATE(apb0_twd_clk, "apb0-twd", "apb0", + 0x28, BIT(7), 0); + +static const char * const r_mod0_default_parents[] = { "osc32k", "osc24M" }; +static SUNXI_CCU_MP_WITH_MUX_GATE(ir_clk, "ir", + r_mod0_default_parents, 0x54, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static struct ccu_common *sun8i_h3_r_ccu_clks[] = { + &ar100_clk.common, + &apb0_clk.common, + &apb0_pio_clk.common, + &apb0_ir_clk.common, + &apb0_timer_clk.common, + &apb0_uart_clk.common, + &apb0_i2c_clk.common, + &apb0_twd_clk.common, + &ir_clk.common, +}; + +static struct ccu_common *sun50i_a64_r_ccu_clks[] = { + &ar100_clk.common, + &apb0_clk.common, + &apb0_pio_clk.common, + &apb0_ir_clk.common, + &apb0_timer_clk.common, + &apb0_rsb_clk.common, + &apb0_uart_clk.common, + &apb0_i2c_clk.common, + &apb0_twd_clk.common, + &ir_clk.common, +}; + +static struct clk_hw_onecell_data sun8i_h3_r_hw_clks = { + .hws = { + [CLK_AR100] = &ar100_clk.common.hw, + [CLK_AHB0] = &ahb0_clk.hw, + [CLK_APB0] = &apb0_clk.common.hw, + [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, + [CLK_APB0_IR] = &apb0_ir_clk.common.hw, + [CLK_APB0_TIMER] = &apb0_timer_clk.common.hw, + [CLK_APB0_UART] = &apb0_uart_clk.common.hw, + [CLK_APB0_I2C] = &apb0_i2c_clk.common.hw, + [CLK_APB0_TWD] = &apb0_twd_clk.common.hw, + [CLK_IR] = &ir_clk.common.hw, + }, + .num = CLK_NUMBER, +}; + +static struct clk_hw_onecell_data sun50i_a64_r_hw_clks = { + .hws = { + [CLK_AR100] = &ar100_clk.common.hw, + [CLK_AHB0] = &ahb0_clk.hw, + [CLK_APB0] = &apb0_clk.common.hw, + [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, + [CLK_APB0_IR] = &apb0_ir_clk.common.hw, + [CLK_APB0_TIMER] = &apb0_timer_clk.common.hw, + [CLK_APB0_RSB] = &apb0_rsb_clk.common.hw, + [CLK_APB0_UART] = &apb0_uart_clk.common.hw, + [CLK_APB0_I2C] = &apb0_i2c_clk.common.hw, + [CLK_APB0_TWD] = &apb0_twd_clk.common.hw, + [CLK_IR] = &ir_clk.common.hw, + }, + .num = CLK_NUMBER, +}; + +static struct ccu_reset_map sun8i_h3_r_ccu_resets[] = { + [RST_APB0_IR] = { 0xb0, BIT(1) }, + [RST_APB0_TIMER] = { 0xb0, BIT(2) }, + [RST_APB0_UART] = { 0xb0, BIT(4) }, + [RST_APB0_I2C] = { 0xb0, BIT(6) }, +}; + +static struct ccu_reset_map sun50i_a64_r_ccu_resets[] = { + [RST_APB0_IR] = { 0xb0, BIT(1) }, + [RST_APB0_TIMER] = { 0xb0, BIT(2) }, + [RST_APB0_RSB] = { 0xb0, BIT(3) }, + [RST_APB0_UART] = { 0xb0, BIT(4) }, + [RST_APB0_I2C] = { 0xb0, BIT(6) }, +}; + +static const struct sunxi_ccu_desc sun8i_h3_r_ccu_desc = { + .ccu_clks = sun8i_h3_r_ccu_clks, + .num_ccu_clks = ARRAY_SIZE(sun8i_h3_r_ccu_clks), + + .hw_clks = &sun8i_h3_r_hw_clks, + + .resets = sun8i_h3_r_ccu_resets, + .num_resets = ARRAY_SIZE(sun8i_h3_r_ccu_resets), +}; + +static const struct sunxi_ccu_desc sun50i_a64_r_ccu_desc = { + .ccu_clks = sun50i_a64_r_ccu_clks, + .num_ccu_clks = ARRAY_SIZE(sun50i_a64_r_ccu_clks), + + .hw_clks = &sun50i_a64_r_hw_clks, + + .resets = sun50i_a64_r_ccu_resets, + .num_resets = ARRAY_SIZE(sun50i_a64_r_ccu_resets), +}; + +static void __init sunxi_r_ccu_init(struct device_node *node, + const struct sunxi_ccu_desc *desc) +{ + void __iomem *reg; + + reg = of_io_request_and_map(node, 0, of_node_full_name(node)); + if (IS_ERR(reg)) { + pr_err("%s: Could not map the clock registers\n", + of_node_full_name(node)); + return; + } + + sunxi_ccu_probe(node, reg, desc); +} + +static void __init sun8i_h3_r_ccu_setup(struct device_node *node) +{ + sunxi_r_ccu_init(node, &sun8i_h3_r_ccu_desc); +} +CLK_OF_DECLARE(sun8i_h3_r_ccu, "allwinner,sun8i-h3-r-ccu", + sun8i_h3_r_ccu_setup); + +static void __init sun50i_a64_r_ccu_setup(struct device_node *node) +{ + sunxi_r_ccu_init(node, &sun50i_a64_r_ccu_desc); +} +CLK_OF_DECLARE(sun50i_a64_r_ccu, "allwinner,sun50i-a64-r-ccu", + sun50i_a64_r_ccu_setup); diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.h b/drivers/clk/sunxi-ng/ccu-sun8i-r.h new file mode 100644 index 000000000000..a7a407f12b56 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.h @@ -0,0 +1,27 @@ +/* + * Copyright 2016 Icenowy <icenowy@aosc.xyz> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_SUN8I_R_H +#define _CCU_SUN8I_R_H_ + +#include <dt-bindings/clock/sun8i-r-ccu.h> +#include <dt-bindings/reset/sun8i-r-ccu.h> + +/* AHB/APB bus clocks are not exported */ +#define CLK_AHB0 1 +#define CLK_APB0 2 + +#define CLK_NUMBER (CLK_IR + 1) + +#endif /* _CCU_SUN8I_R_H */ diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c index e13e313ce4f5..8936ef87652c 100644 --- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c +++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c @@ -29,49 +29,48 @@ #define CCU_SUN9I_LOCK_REG 0x09c -static struct clk_div_table pll_cpux_p_div_table[] = { - { .val = 0, .div = 1 }, - { .val = 1, .div = 4 }, - { /* Sentinel */ }, -}; - /* - * The CPU PLLs are actually NP clocks, but P is /1 or /4, so here we - * use the NM clocks with a divider table for M. + * The CPU PLLs are actually NP clocks, with P being /1 or /4. However + * P should only be used for output frequencies lower than 228 MHz. + * Neither mainline Linux, U-boot, nor the vendor BSPs use these. + * + * For now we can just model it as a multiplier clock, and force P to /1. */ -static struct ccu_nm pll_c0cpux_clk = { +#define SUN9I_A80_PLL_C0CPUX_REG 0x000 +#define SUN9I_A80_PLL_C1CPUX_REG 0x004 + +static struct ccu_mult pll_c0cpux_clk = { .enable = BIT(31), .lock = BIT(0), - .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), - .m = _SUNXI_CCU_DIV_TABLE(16, 1, pll_cpux_p_div_table), + .mult = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), .common = { - .reg = 0x000, + .reg = SUN9I_A80_PLL_C0CPUX_REG, .lock_reg = CCU_SUN9I_LOCK_REG, .features = CCU_FEATURE_LOCK_REG, .hw.init = CLK_HW_INIT("pll-c0cpux", "osc24M", - &ccu_nm_ops, CLK_SET_RATE_UNGATE), + &ccu_mult_ops, + CLK_SET_RATE_UNGATE), }, }; -static struct ccu_nm pll_c1cpux_clk = { +static struct ccu_mult pll_c1cpux_clk = { .enable = BIT(31), .lock = BIT(1), - .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), - .m = _SUNXI_CCU_DIV_TABLE(16, 1, pll_cpux_p_div_table), + .mult = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), .common = { - .reg = 0x004, + .reg = SUN9I_A80_PLL_C1CPUX_REG, .lock_reg = CCU_SUN9I_LOCK_REG, .features = CCU_FEATURE_LOCK_REG, .hw.init = CLK_HW_INIT("pll-c1cpux", "osc24M", - &ccu_nm_ops, CLK_SET_RATE_UNGATE), + &ccu_mult_ops, + CLK_SET_RATE_UNGATE), }, }; /* * The Audio PLL has d1, d2 dividers in addition to the usual N, M * factors. Since we only need 2 frequencies from this PLL: 22.5792 MHz - * and 24.576 MHz, ignore them for now. Enforce the default for them, - * which is d1 = 0, d2 = 1. + * and 24.576 MHz, ignore them for now. Enforce d1 = 0 and d2 = 0. */ #define SUN9I_A80_PLL_AUDIO_REG 0x008 @@ -1189,6 +1188,36 @@ static const struct sunxi_ccu_desc sun9i_a80_ccu_desc = { .num_resets = ARRAY_SIZE(sun9i_a80_ccu_resets), }; +#define SUN9I_A80_PLL_P_SHIFT 16 +#define SUN9I_A80_PLL_N_SHIFT 8 +#define SUN9I_A80_PLL_N_WIDTH 8 + +static void sun9i_a80_cpu_pll_fixup(void __iomem *reg) +{ + u32 val = readl(reg); + + /* bail out if P divider is not used */ + if (!(val & BIT(SUN9I_A80_PLL_P_SHIFT))) + return; + + /* + * If P is used, output should be less than 288 MHz. When we + * set P to 1, we should also decrease the multiplier so the + * output doesn't go out of range, but not too much such that + * the multiplier stays above 12, the minimal operation value. + * + * To keep it simple, set the multiplier to 17, the reset value. + */ + val &= ~GENMASK(SUN9I_A80_PLL_N_SHIFT + SUN9I_A80_PLL_N_WIDTH - 1, + SUN9I_A80_PLL_N_SHIFT); + val |= 17 << SUN9I_A80_PLL_N_SHIFT; + + /* And clear P */ + val &= ~BIT(SUN9I_A80_PLL_P_SHIFT); + + writel(val, reg); +} + static int sun9i_a80_ccu_probe(struct platform_device *pdev) { struct resource *res; @@ -1205,6 +1234,10 @@ static int sun9i_a80_ccu_probe(struct platform_device *pdev) val &= (BIT(16) & BIT(18)); writel(val, reg + SUN9I_A80_PLL_AUDIO_REG); + /* Enforce P = 1 for both CPU cluster PLLs */ + sun9i_a80_cpu_pll_fixup(reg + SUN9I_A80_PLL_C0CPUX_REG); + sun9i_a80_cpu_pll_fixup(reg + SUN9I_A80_PLL_C1CPUX_REG); + return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun9i_a80_ccu_desc); } diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c index 9d8724715a43..40aac316128f 100644 --- a/drivers/clk/sunxi-ng/ccu_common.c +++ b/drivers/clk/sunxi-ng/ccu_common.c @@ -112,8 +112,8 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, ret = clk_hw_register(NULL, hw); if (ret) { - pr_err("Couldn't register clock %s\n", - clk_hw_get_name(hw)); + pr_err("Couldn't register clock %d - %s\n", + i, clk_hw_get_name(hw)); goto err_clk_unreg; } } diff --git a/drivers/clk/sunxi-ng/ccu_gate.c b/drivers/clk/sunxi-ng/ccu_gate.c index 8a81f9d4a89f..cd069d5da215 100644 --- a/drivers/clk/sunxi-ng/ccu_gate.c +++ b/drivers/clk/sunxi-ng/ccu_gate.c @@ -75,8 +75,55 @@ static int ccu_gate_is_enabled(struct clk_hw *hw) return ccu_gate_helper_is_enabled(&cg->common, cg->enable); } +static unsigned long ccu_gate_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_gate *cg = hw_to_ccu_gate(hw); + unsigned long rate = parent_rate; + + if (cg->common.features & CCU_FEATURE_ALL_PREDIV) + rate /= cg->common.prediv; + + return rate; +} + +static long ccu_gate_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct ccu_gate *cg = hw_to_ccu_gate(hw); + int div = 1; + + if (cg->common.features & CCU_FEATURE_ALL_PREDIV) + div = cg->common.prediv; + + if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) { + unsigned long best_parent = rate; + + if (cg->common.features & CCU_FEATURE_ALL_PREDIV) + best_parent *= div; + *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent); + } + + return *prate / div; +} + +static int ccu_gate_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + /* + * We must report success but we can do so unconditionally because + * clk_factor_round_rate returns values that ensure this call is a + * nop. + */ + + return 0; +} + const struct clk_ops ccu_gate_ops = { .disable = ccu_gate_disable, .enable = ccu_gate_enable, .is_enabled = ccu_gate_is_enabled, + .round_rate = ccu_gate_round_rate, + .set_rate = ccu_gate_set_rate, + .recalc_rate = ccu_gate_recalc_rate, }; diff --git a/drivers/clk/sunxi-ng/ccu_mult.c b/drivers/clk/sunxi-ng/ccu_mult.c index 8724c01171b1..671141359895 100644 --- a/drivers/clk/sunxi-ng/ccu_mult.c +++ b/drivers/clk/sunxi-ng/ccu_mult.c @@ -137,6 +137,8 @@ static int ccu_mult_set_rate(struct clk_hw *hw, unsigned long rate, spin_unlock_irqrestore(cm->common.lock, flags); + ccu_helper_wait_for_lock(&cm->common, cm->lock); + return 0; } diff --git a/drivers/clk/sunxi-ng/ccu_mult.h b/drivers/clk/sunxi-ng/ccu_mult.h index 524acddfcb2e..f9c37b987d72 100644 --- a/drivers/clk/sunxi-ng/ccu_mult.h +++ b/drivers/clk/sunxi-ng/ccu_mult.h @@ -33,6 +33,7 @@ struct ccu_mult_internal { struct ccu_mult { u32 enable; + u32 lock; struct ccu_frac_internal frac; struct ccu_mult_internal mult; @@ -45,6 +46,7 @@ struct ccu_mult { _flags) \ struct ccu_mult _struct = { \ .enable = _gate, \ + .lock = _lock, \ .mult = _SUNXI_CCU_MULT(_mshift, _mwidth), \ .common = { \ .reg = _reg, \ diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c index b9e9b8a9d1b4..2485bda87a9a 100644 --- a/drivers/clk/sunxi-ng/ccu_nk.c +++ b/drivers/clk/sunxi-ng/ccu_nk.c @@ -102,9 +102,9 @@ static long ccu_nk_round_rate(struct clk_hw *hw, unsigned long rate, if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV) rate *= nk->fixed_post_div; - _nk.min_n = nk->n.min; + _nk.min_n = nk->n.min ?: 1; _nk.max_n = nk->n.max ?: 1 << nk->n.width; - _nk.min_k = nk->k.min; + _nk.min_k = nk->k.min ?: 1; _nk.max_k = nk->k.max ?: 1 << nk->k.width; ccu_nk_find_best(*parent_rate, rate, &_nk); @@ -127,9 +127,9 @@ static int ccu_nk_set_rate(struct clk_hw *hw, unsigned long rate, if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV) rate = rate * nk->fixed_post_div; - _nk.min_n = nk->n.min; + _nk.min_n = nk->n.min ?: 1; _nk.max_n = nk->n.max ?: 1 << nk->n.width; - _nk.min_k = nk->k.min; + _nk.min_k = nk->k.min ?: 1; _nk.max_k = nk->k.max ?: 1 << nk->k.width; ccu_nk_find_best(parent_rate, rate, &_nk); diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c index 71f81e95a061..cba84afe1cf1 100644 --- a/drivers/clk/sunxi-ng/ccu_nkm.c +++ b/drivers/clk/sunxi-ng/ccu_nkm.c @@ -109,9 +109,9 @@ static unsigned long ccu_nkm_round_rate(struct ccu_mux_internal *mux, struct ccu_nkm *nkm = data; struct _ccu_nkm _nkm; - _nkm.min_n = nkm->n.min; + _nkm.min_n = nkm->n.min ?: 1; _nkm.max_n = nkm->n.max ?: 1 << nkm->n.width; - _nkm.min_k = nkm->k.min; + _nkm.min_k = nkm->k.min ?: 1; _nkm.max_k = nkm->k.max ?: 1 << nkm->k.width; _nkm.min_m = 1; _nkm.max_m = nkm->m.max ?: 1 << nkm->m.width; @@ -138,9 +138,9 @@ static int ccu_nkm_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long flags; u32 reg; - _nkm.min_n = nkm->n.min; + _nkm.min_n = nkm->n.min ?: 1; _nkm.max_n = nkm->n.max ?: 1 << nkm->n.width; - _nkm.min_k = nkm->k.min; + _nkm.min_k = nkm->k.min ?: 1; _nkm.max_k = nkm->k.max ?: 1 << nkm->k.width; _nkm.min_m = 1; _nkm.max_m = nkm->m.max ?: 1 << nkm->m.width; diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c index 488055ed944f..e58c95787f94 100644 --- a/drivers/clk/sunxi-ng/ccu_nkmp.c +++ b/drivers/clk/sunxi-ng/ccu_nkmp.c @@ -116,9 +116,9 @@ static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate, struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw); struct _ccu_nkmp _nkmp; - _nkmp.min_n = nkmp->n.min; + _nkmp.min_n = nkmp->n.min ?: 1; _nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width; - _nkmp.min_k = nkmp->k.min; + _nkmp.min_k = nkmp->k.min ?: 1; _nkmp.max_k = nkmp->k.max ?: 1 << nkmp->k.width; _nkmp.min_m = 1; _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width; @@ -138,9 +138,9 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long flags; u32 reg; - _nkmp.min_n = 1; + _nkmp.min_n = nkmp->n.min ?: 1; _nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width; - _nkmp.min_k = 1; + _nkmp.min_k = nkmp->k.min ?: 1; _nkmp.max_k = nkmp->k.max ?: 1 << nkmp->k.width; _nkmp.min_m = 1; _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width; diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c index af71b1909cd9..5e5e90a4a50c 100644 --- a/drivers/clk/sunxi-ng/ccu_nm.c +++ b/drivers/clk/sunxi-ng/ccu_nm.c @@ -99,7 +99,7 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate, struct ccu_nm *nm = hw_to_ccu_nm(hw); struct _ccu_nm _nm; - _nm.min_n = nm->n.min; + _nm.min_n = nm->n.min ?: 1; _nm.max_n = nm->n.max ?: 1 << nm->n.width; _nm.min_m = 1; _nm.max_m = nm->m.max ?: 1 << nm->m.width; @@ -122,7 +122,7 @@ static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate, else ccu_frac_helper_disable(&nm->common, &nm->frac); - _nm.min_n = 1; + _nm.min_n = nm->n.min ?: 1; _nm.max_n = nm->n.max ?: 1 << nm->n.width; _nm.min_m = 1; _nm.max_m = nm->m.max ?: 1 << nm->m.width; diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h index 5738635c5274..689f344377a7 100644 --- a/drivers/clk/tegra/clk-id.h +++ b/drivers/clk/tegra/clk-id.h @@ -307,6 +307,23 @@ enum clk_id { tegra_clk_xusb_ssp_src, tegra_clk_sclk_mux, tegra_clk_sor_safe, + tegra_clk_cec, + tegra_clk_ispa, + tegra_clk_dmic1, + tegra_clk_dmic2, + tegra_clk_dmic3, + tegra_clk_dmic1_sync_clk, + tegra_clk_dmic2_sync_clk, + tegra_clk_dmic3_sync_clk, + tegra_clk_dmic1_sync_clk_mux, + tegra_clk_dmic2_sync_clk_mux, + tegra_clk_dmic3_sync_clk_mux, + tegra_clk_iqc1, + tegra_clk_iqc2, + tegra_clk_pll_a_out_adsp, + tegra_clk_pll_a_out0_out_adsp, + tegra_clk_adsp, + tegra_clk_adsp_neon, tegra_clk_max, }; diff --git a/drivers/clk/tegra/clk-periph-gate.c b/drivers/clk/tegra/clk-periph-gate.c index 88127828befe..303ef32ee3f1 100644 --- a/drivers/clk/tegra/clk-periph-gate.c +++ b/drivers/clk/tegra/clk-periph-gate.c @@ -159,6 +159,9 @@ struct clk *tegra_clk_register_periph_gate(const char *name, gate->enable_refcnt = enable_refcnt; gate->regs = pregs; + if (read_enb(gate) & periph_clk_to_bit(gate)) + enable_refcnt[clk_num]++; + /* Data in .init is copied by clk_register(), so stack variable OK */ gate->hw.init = &init; diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c index a17ca6d7f649..cf80831de79d 100644 --- a/drivers/clk/tegra/clk-periph.c +++ b/drivers/clk/tegra/clk-periph.c @@ -138,7 +138,7 @@ static const struct clk_ops tegra_clk_periph_no_gate_ops = { }; static struct clk *_tegra_clk_register_periph(const char *name, - const char **parent_names, int num_parents, + const char * const *parent_names, int num_parents, struct tegra_clk_periph *periph, void __iomem *clk_base, u32 offset, unsigned long flags) @@ -186,7 +186,7 @@ static struct clk *_tegra_clk_register_periph(const char *name, } struct clk *tegra_clk_register_periph(const char *name, - const char **parent_names, int num_parents, + const char * const *parent_names, int num_parents, struct tegra_clk_periph *periph, void __iomem *clk_base, u32 offset, unsigned long flags) { @@ -195,7 +195,7 @@ struct clk *tegra_clk_register_periph(const char *name, } struct clk *tegra_clk_register_periph_nodiv(const char *name, - const char **parent_names, int num_parents, + const char * const *parent_names, int num_parents, struct tegra_clk_periph *periph, void __iomem *clk_base, u32 offset) { diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index b3855360d6bc..159a854779e6 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c @@ -2517,152 +2517,6 @@ static int clk_plle_tegra210_is_enabled(struct clk_hw *hw) return val & PLLE_BASE_ENABLE ? 1 : 0; } -static int clk_pllu_tegra210_enable(struct clk_hw *hw) -{ - struct tegra_clk_pll *pll = to_clk_pll(hw); - struct clk_hw *pll_ref = clk_hw_get_parent(hw); - struct clk_hw *osc = clk_hw_get_parent(pll_ref); - const struct utmi_clk_param *params = NULL; - unsigned long flags = 0, input_rate; - unsigned int i; - int ret = 0; - u32 value; - - if (!osc) { - pr_err("%s: failed to get OSC clock\n", __func__); - return -EINVAL; - } - - input_rate = clk_hw_get_rate(osc); - - if (pll->lock) - spin_lock_irqsave(pll->lock, flags); - - _clk_pll_enable(hw); - - ret = clk_pll_wait_for_lock(pll); - if (ret < 0) - goto out; - - for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) { - if (input_rate == utmi_parameters[i].osc_frequency) { - params = &utmi_parameters[i]; - break; - } - } - - if (!params) { - pr_err("%s: unexpected input rate %lu Hz\n", __func__, - input_rate); - ret = -EINVAL; - goto out; - } - - value = pll_readl_base(pll); - value &= ~PLLU_BASE_OVERRIDE; - pll_writel_base(value, pll); - - /* Put PLLU under HW control */ - value = readl_relaxed(pll->clk_base + PLLU_HW_PWRDN_CFG0); - value |= PLLU_HW_PWRDN_CFG0_IDDQ_PD_INCLUDE | - PLLU_HW_PWRDN_CFG0_USE_SWITCH_DETECT | - PLLU_HW_PWRDN_CFG0_USE_LOCKDET; - value &= ~(PLLU_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL | - PLLU_HW_PWRDN_CFG0_CLK_SWITCH_SWCTL); - writel_relaxed(value, pll->clk_base + PLLU_HW_PWRDN_CFG0); - - value = readl_relaxed(pll->clk_base + XUSB_PLL_CFG0); - value &= ~XUSB_PLL_CFG0_PLLU_LOCK_DLY; - writel_relaxed(value, pll->clk_base + XUSB_PLL_CFG0); - - udelay(1); - - value = readl_relaxed(pll->clk_base + PLLU_HW_PWRDN_CFG0); - value |= PLLU_HW_PWRDN_CFG0_SEQ_ENABLE; - writel_relaxed(value, pll->clk_base + PLLU_HW_PWRDN_CFG0); - - udelay(1); - - /* Disable PLLU clock branch to UTMIPLL since it uses OSC */ - value = pll_readl_base(pll); - value &= ~PLLU_BASE_CLKENABLE_USB; - pll_writel_base(value, pll); - - value = readl_relaxed(pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); - if (value & UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE) { - pr_debug("UTMIPLL already enabled\n"); - goto out; - } - - value &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE; - writel_relaxed(value, pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); - - /* Program UTMIP PLL stable and active counts */ - value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG2); - value &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0); - value |= UTMIP_PLL_CFG2_STABLE_COUNT(params->stable_count); - value &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0); - value |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(params->active_delay_count); - value |= UTMIP_PLL_CFG2_PHY_XTAL_CLOCKEN; - writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG2); - - /* Program UTMIP PLL delay and oscillator frequency counts */ - value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG1); - value &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0); - value |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(params->enable_delay_count); - value &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0); - value |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(params->xtal_freq_count); - writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG1); - - /* Remove power downs from UTMIP PLL control bits */ - value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG1); - value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; - value |= UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP; - writel(value, pll->clk_base + UTMIP_PLL_CFG1); - - udelay(1); - - /* Enable samplers for SNPS, XUSB_HOST, XUSB_DEV */ - value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG2); - value |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERUP; - value |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERUP; - value |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERUP; - value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN; - value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN; - value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERDOWN; - writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG2); - - /* Setup HW control of UTMIPLL */ - value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG1); - value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP; - value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; - writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG1); - - value = readl_relaxed(pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); - value |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET; - value &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL; - writel_relaxed(value, pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); - - udelay(1); - - value = readl_relaxed(pll->clk_base + XUSB_PLL_CFG0); - value &= ~XUSB_PLL_CFG0_UTMIPLL_LOCK_DLY; - writel_relaxed(value, pll->clk_base + XUSB_PLL_CFG0); - - udelay(1); - - /* Enable HW control of UTMIPLL */ - value = readl_relaxed(pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); - value |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE; - writel_relaxed(value, pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); - -out: - if (pll->lock) - spin_unlock_irqrestore(pll->lock, flags); - - return ret; -} - static const struct clk_ops tegra_clk_plle_tegra210_ops = { .is_enabled = clk_plle_tegra210_is_enabled, .enable = clk_plle_tegra210_enable, @@ -2670,13 +2524,6 @@ static const struct clk_ops tegra_clk_plle_tegra210_ops = { .recalc_rate = clk_pll_recalc_rate, }; -static const struct clk_ops tegra_clk_pllu_tegra210_ops = { - .is_enabled = clk_pll_is_enabled, - .enable = clk_pllu_tegra210_enable, - .disable = clk_pll_disable, - .recalc_rate = clk_pllre_recalc_rate, -}; - struct clk *tegra_clk_register_plle_tegra210(const char *name, const char *parent_name, void __iomem *clk_base, unsigned long flags, @@ -2918,25 +2765,4 @@ struct clk *tegra_clk_register_pllmb(const char *name, const char *parent_name, return clk; } -struct clk *tegra_clk_register_pllu_tegra210(const char *name, - const char *parent_name, void __iomem *clk_base, - unsigned long flags, struct tegra_clk_pll_params *pll_params, - spinlock_t *lock) -{ - struct tegra_clk_pll *pll; - struct clk *clk; - - pll_params->flags |= TEGRA_PLLU; - - pll = _tegra_init_pll(clk_base, NULL, pll_params, lock); - if (IS_ERR(pll)) - return ERR_CAST(pll); - - clk = _tegra_clk_register_pll(pll, name, parent_name, flags, - &tegra_clk_pllu_tegra210_ops); - if (IS_ERR(clk)) - kfree(pll); - - return clk; -} #endif diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c index 131d1b5085e2..84267cfc4433 100644 --- a/drivers/clk/tegra/clk-super.c +++ b/drivers/clk/tegra/clk-super.c @@ -121,9 +121,50 @@ out: return err; } +const struct clk_ops tegra_clk_super_mux_ops = { + .get_parent = clk_super_get_parent, + .set_parent = clk_super_set_parent, +}; + +static long clk_super_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct tegra_clk_super_mux *super = to_clk_super_mux(hw); + struct clk_hw *div_hw = &super->frac_div.hw; + + __clk_hw_set_clk(div_hw, hw); + + return super->div_ops->round_rate(div_hw, rate, parent_rate); +} + +static unsigned long clk_super_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct tegra_clk_super_mux *super = to_clk_super_mux(hw); + struct clk_hw *div_hw = &super->frac_div.hw; + + __clk_hw_set_clk(div_hw, hw); + + return super->div_ops->recalc_rate(div_hw, parent_rate); +} + +static int clk_super_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct tegra_clk_super_mux *super = to_clk_super_mux(hw); + struct clk_hw *div_hw = &super->frac_div.hw; + + __clk_hw_set_clk(div_hw, hw); + + return super->div_ops->set_rate(div_hw, rate, parent_rate); +} + const struct clk_ops tegra_clk_super_ops = { .get_parent = clk_super_get_parent, .set_parent = clk_super_set_parent, + .set_rate = clk_super_set_rate, + .round_rate = clk_super_round_rate, + .recalc_rate = clk_super_recalc_rate, }; struct clk *tegra_clk_register_super_mux(const char *name, @@ -136,13 +177,11 @@ struct clk *tegra_clk_register_super_mux(const char *name, struct clk_init_data init; super = kzalloc(sizeof(*super), GFP_KERNEL); - if (!super) { - pr_err("%s: could not allocate super clk\n", __func__); + if (!super) return ERR_PTR(-ENOMEM); - } init.name = name; - init.ops = &tegra_clk_super_ops; + init.ops = &tegra_clk_super_mux_ops; init.flags = flags; init.parent_names = parent_names; init.num_parents = num_parents; @@ -163,3 +202,43 @@ struct clk *tegra_clk_register_super_mux(const char *name, return clk; } + +struct clk *tegra_clk_register_super_clk(const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, void __iomem *reg, u8 clk_super_flags, + spinlock_t *lock) +{ + struct tegra_clk_super_mux *super; + struct clk *clk; + struct clk_init_data init; + + super = kzalloc(sizeof(*super), GFP_KERNEL); + if (!super) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &tegra_clk_super_ops; + init.flags = flags; + init.parent_names = parent_names; + init.num_parents = num_parents; + + super->reg = reg; + super->lock = lock; + super->width = 4; + super->flags = clk_super_flags; + super->frac_div.reg = reg + 4; + super->frac_div.shift = 16; + super->frac_div.width = 8; + super->frac_div.frac_width = 1; + super->frac_div.lock = lock; + super->div_ops = &tegra_clk_frac_div_ops; + + /* Data in .init is copied by clk_register(), so stack variable OK */ + super->hw.init = &init; + + clk = clk_register(NULL, &super->hw); + if (IS_ERR(clk)) + kfree(super); + + return clk; +} diff --git a/drivers/clk/tegra/clk-tegra-audio.c b/drivers/clk/tegra/clk-tegra-audio.c index e2bfa9b368f6..b37cae7af26d 100644 --- a/drivers/clk/tegra/clk-tegra-audio.c +++ b/drivers/clk/tegra/clk-tegra-audio.c @@ -31,6 +31,9 @@ #define AUDIO_SYNC_CLK_I2S3 0x4ac #define AUDIO_SYNC_CLK_I2S4 0x4b0 #define AUDIO_SYNC_CLK_SPDIF 0x4b4 +#define AUDIO_SYNC_CLK_DMIC1 0x560 +#define AUDIO_SYNC_CLK_DMIC2 0x564 +#define AUDIO_SYNC_CLK_DMIC3 0x6b8 #define AUDIO_SYNC_DOUBLER 0x49c @@ -91,8 +94,14 @@ struct tegra_audio2x_clk_initdata { static DEFINE_SPINLOCK(clk_doubler_lock); -static const char *mux_audio_sync_clk[] = { "spdif_in_sync", "i2s0_sync", - "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "vimclk_sync", +static const char * const mux_audio_sync_clk[] = { "spdif_in_sync", + "i2s0_sync", "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", + "pll_a_out0", "vimclk_sync", +}; + +static const char * const mux_dmic_sync_clk[] = { "unused", "i2s0_sync", + "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "pll_a_out0", + "vimclk_sync", }; static struct tegra_sync_source_initdata sync_source_clks[] __initdata = { @@ -114,6 +123,12 @@ static struct tegra_audio_clk_initdata audio_clks[] = { AUDIO(spdif, AUDIO_SYNC_CLK_SPDIF), }; +static struct tegra_audio_clk_initdata dmic_clks[] = { + AUDIO(dmic1_sync_clk, AUDIO_SYNC_CLK_DMIC1), + AUDIO(dmic2_sync_clk, AUDIO_SYNC_CLK_DMIC2), + AUDIO(dmic3_sync_clk, AUDIO_SYNC_CLK_DMIC3), +}; + static struct tegra_audio2x_clk_initdata audio2x_clks[] = { AUDIO2X(audio0, 113, 24), AUDIO2X(audio1, 114, 25), @@ -123,6 +138,41 @@ static struct tegra_audio2x_clk_initdata audio2x_clks[] = { AUDIO2X(spdif, 118, 29), }; +static void __init tegra_audio_sync_clk_init(void __iomem *clk_base, + struct tegra_clk *tegra_clks, + struct tegra_audio_clk_initdata *sync, + int num_sync_clks, + const char * const *mux_names, + int num_mux_inputs) +{ + struct clk *clk; + struct clk **dt_clk; + struct tegra_audio_clk_initdata *data; + int i; + + for (i = 0, data = sync; i < num_sync_clks; i++, data++) { + dt_clk = tegra_lookup_dt_id(data->mux_clk_id, tegra_clks); + if (!dt_clk) + continue; + + clk = clk_register_mux(NULL, data->mux_name, mux_names, + num_mux_inputs, + CLK_SET_RATE_NO_REPARENT, + clk_base + data->offset, 0, 3, 0, + NULL); + *dt_clk = clk; + + dt_clk = tegra_lookup_dt_id(data->gate_clk_id, tegra_clks); + if (!dt_clk) + continue; + + clk = clk_register_gate(NULL, data->gate_name, data->mux_name, + 0, clk_base + data->offset, 4, + CLK_GATE_SET_TO_DISABLE, NULL); + *dt_clk = clk; + } +} + void __init tegra_audio_clk_init(void __iomem *clk_base, void __iomem *pmc_base, struct tegra_clk *tegra_clks, struct tegra_audio_clk_info *audio_info, @@ -176,30 +226,17 @@ void __init tegra_audio_clk_init(void __iomem *clk_base, *dt_clk = clk; } - for (i = 0; i < ARRAY_SIZE(audio_clks); i++) { - struct tegra_audio_clk_initdata *data; + tegra_audio_sync_clk_init(clk_base, tegra_clks, audio_clks, + ARRAY_SIZE(audio_clks), mux_audio_sync_clk, + ARRAY_SIZE(mux_audio_sync_clk)); - data = &audio_clks[i]; - dt_clk = tegra_lookup_dt_id(data->mux_clk_id, tegra_clks); + /* make sure the DMIC sync clocks have a valid parent */ + for (i = 0; i < ARRAY_SIZE(dmic_clks); i++) + writel_relaxed(1, clk_base + dmic_clks[i].offset); - if (!dt_clk) - continue; - clk = clk_register_mux(NULL, data->mux_name, mux_audio_sync_clk, - ARRAY_SIZE(mux_audio_sync_clk), - CLK_SET_RATE_NO_REPARENT, - clk_base + data->offset, 0, 3, 0, - NULL); - *dt_clk = clk; - - dt_clk = tegra_lookup_dt_id(data->gate_clk_id, tegra_clks); - if (!dt_clk) - continue; - - clk = clk_register_gate(NULL, data->gate_name, data->mux_name, - 0, clk_base + data->offset, 4, - CLK_GATE_SET_TO_DISABLE, NULL); - *dt_clk = clk; - } + tegra_audio_sync_clk_init(clk_base, tegra_clks, dmic_clks, + ARRAY_SIZE(dmic_clks), mux_dmic_sync_clk, + ARRAY_SIZE(mux_dmic_sync_clk)); for (i = 0; i < ARRAY_SIZE(audio2x_clks); i++) { struct tegra_audio2x_clk_initdata *data; diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c index 4ce4e7fb1124..294bfe40a4f5 100644 --- a/drivers/clk/tegra/clk-tegra-periph.c +++ b/drivers/clk/tegra/clk-tegra-periph.c @@ -138,6 +138,9 @@ #define CLK_SOURCE_TSECB 0x6d8 #define CLK_SOURCE_MAUD 0x6d4 #define CLK_SOURCE_USB2_HSIC_TRK 0x6cc +#define CLK_SOURCE_DMIC1 0x64c +#define CLK_SOURCE_DMIC2 0x650 +#define CLK_SOURCE_DMIC3 0x6bc #define MASK(x) (BIT(x) - 1) @@ -168,6 +171,12 @@ 0, TEGRA_PERIPH_NO_GATE, _clk_id,\ _parents##_idx, 0, _lock) +#define MUX8_NOGATE(_name, _parents, _offset, _clk_id) \ + TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset, \ + 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP,\ + 0, TEGRA_PERIPH_NO_GATE, _clk_id,\ + _parents##_idx, 0, NULL) + #define INT(_name, _parents, _offset, \ _clk_num, _gate_flags, _clk_id) \ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\ @@ -619,6 +628,21 @@ static const char *mux_clkm_plldp_sor0lvds[] = { }; #define mux_clkm_plldp_sor0lvds_idx NULL +static const char * const mux_dmic1[] = { + "pll_a_out0", "dmic1_sync_clk", "pll_p", "clk_m" +}; +#define mux_dmic1_idx NULL + +static const char * const mux_dmic2[] = { + "pll_a_out0", "dmic2_sync_clk", "pll_p", "clk_m" +}; +#define mux_dmic2_idx NULL + +static const char * const mux_dmic3[] = { + "pll_a_out0", "dmic3_sync_clk", "pll_p", "clk_m" +}; +#define mux_dmic3_idx NULL + static struct tegra_periph_init_data periph_clks[] = { AUDIO("d_audio", CLK_SOURCE_D_AUDIO, 106, TEGRA_PERIPH_ON_APB, tegra_clk_d_audio), AUDIO("dam0", CLK_SOURCE_DAM0, 108, TEGRA_PERIPH_ON_APB, tegra_clk_dam0), @@ -739,7 +763,7 @@ static struct tegra_periph_init_data periph_clks[] = { MUX8("soc_therm", mux_clkm_pllc_pllp_plla, CLK_SOURCE_SOC_THERM, 78, TEGRA_PERIPH_ON_APB, tegra_clk_soc_therm_8), MUX8("vi_sensor", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR, 164, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor_8), MUX8("isp", mux_pllm_pllc_pllp_plla_clkm_pllc4, CLK_SOURCE_ISP, 23, TEGRA_PERIPH_ON_APB, tegra_clk_isp_8), - MUX8("isp", mux_pllc_pllp_plla1_pllc2_c3_clkm_pllc4, CLK_SOURCE_ISP, 23, TEGRA_PERIPH_ON_APB, tegra_clk_isp_9), + MUX8_NOGATE("isp", mux_pllc_pllp_plla1_pllc2_c3_clkm_pllc4, CLK_SOURCE_ISP, tegra_clk_isp_9), MUX8("entropy", mux_pllp_clkm1, CLK_SOURCE_ENTROPY, 149, 0, tegra_clk_entropy), MUX8("entropy", mux_pllp_clkm_clk32_plle, CLK_SOURCE_ENTROPY, 149, 0, tegra_clk_entropy_8), MUX8("hdmi_audio", mux_pllp3_pllc_clkm, CLK_SOURCE_HDMI_AUDIO, 176, TEGRA_PERIPH_NO_RESET, tegra_clk_hdmi_audio), @@ -788,6 +812,9 @@ static struct tegra_periph_init_data periph_clks[] = { MUX("uartape", mux_pllp_pllc_clkm, CLK_SOURCE_UARTAPE, 212, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_uartape), MUX8("tsecb", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_TSECB, 206, 0, tegra_clk_tsecb), MUX8("maud", mux_pllp_pllp_out3_clkm_clk32k_plla, CLK_SOURCE_MAUD, 202, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_maud), + MUX8("dmic1", mux_dmic1, CLK_SOURCE_DMIC1, 161, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_dmic1), + MUX8("dmic2", mux_dmic2, CLK_SOURCE_DMIC2, 162, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_dmic2), + MUX8("dmic3", mux_dmic3, CLK_SOURCE_DMIC3, 197, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_dmic3), }; static struct tegra_periph_init_data gate_clks[] = { @@ -809,7 +836,7 @@ static struct tegra_periph_init_data gate_clks[] = { GATE("usb2", "clk_m", 58, 0, tegra_clk_usb2, 0), GATE("usb3", "clk_m", 59, 0, tegra_clk_usb3, 0), GATE("csi", "pll_p_out3", 52, 0, tegra_clk_csi, 0), - GATE("afi", "clk_m", 72, 0, tegra_clk_afi, 0), + GATE("afi", "mselect", 72, 0, tegra_clk_afi, 0), GATE("csus", "clk_m", 92, TEGRA_PERIPH_NO_RESET, tegra_clk_csus, 0), GATE("dds", "clk_m", 150, TEGRA_PERIPH_ON_APB, tegra_clk_dds, 0), GATE("dp2", "clk_m", 152, TEGRA_PERIPH_ON_APB, tegra_clk_dp2, 0), @@ -819,7 +846,8 @@ static struct tegra_periph_init_data gate_clks[] = { GATE("xusb_dev", "xusb_dev_src", 95, 0, tegra_clk_xusb_dev, 0), GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IGNORE_UNUSED), GATE("sata_cold", "clk_m", 129, TEGRA_PERIPH_ON_APB, tegra_clk_sata_cold, 0), - GATE("ispb", "clk_m", 3, 0, tegra_clk_ispb, 0), + GATE("ispa", "isp", 23, 0, tegra_clk_ispa, 0), + GATE("ispb", "isp", 3, 0, tegra_clk_ispb, 0), GATE("vim2_clk", "clk_m", 11, 0, tegra_clk_vim2_clk, 0), GATE("pcie", "clk_m", 70, 0, tegra_clk_pcie, 0), GATE("gpu", "pll_ref", 184, 0, tegra_clk_gpu, 0), @@ -830,6 +858,13 @@ static struct tegra_periph_init_data gate_clks[] = { GATE("pll_p_out_cpu", "pll_p", 223, 0, tegra_clk_pll_p_out_cpu, 0), GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0), GATE("apb2ape", "clk_m", 107, 0, tegra_clk_apb2ape, 0), + GATE("cec", "pclk", 136, 0, tegra_clk_cec, 0), + GATE("iqc1", "clk_m", 221, 0, tegra_clk_iqc1, 0), + GATE("iqc2", "clk_m", 220, 0, tegra_clk_iqc1, 0), + GATE("pll_a_out_adsp", "pll_a", 188, 0, tegra_clk_pll_a_out_adsp, 0), + GATE("pll_a_out0_out_adsp", "pll_a", 188, 0, tegra_clk_pll_a_out0_out_adsp, 0), + GATE("adsp", "aclk", 199, 0, tegra_clk_adsp, 0), + GATE("adsp_neon", "aclk", 218, 0, tegra_clk_adsp_neon, 0), }; static struct tegra_periph_init_data div_clks[] = { diff --git a/drivers/clk/tegra/clk-tegra-pmc.c b/drivers/clk/tegra/clk-tegra-pmc.c index 91377abfefa1..a35579a3f884 100644 --- a/drivers/clk/tegra/clk-tegra-pmc.c +++ b/drivers/clk/tegra/clk-tegra-pmc.c @@ -95,7 +95,8 @@ void __init tegra_pmc_clk_init(void __iomem *pmc_base, continue; clk = clk_register_mux(NULL, data->mux_name, data->parents, - data->num_parents, CLK_SET_RATE_NO_REPARENT, + data->num_parents, + CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, pmc_base + PMC_CLK_OUT_CNTRL, data->mux_shift, 3, 0, &clk_out_lock); *dt_clk = clk; @@ -106,7 +107,8 @@ void __init tegra_pmc_clk_init(void __iomem *pmc_base, continue; clk = clk_register_gate(NULL, data->gate_name, data->mux_name, - 0, pmc_base + PMC_CLK_OUT_CNTRL, + CLK_SET_RATE_PARENT, + pmc_base + PMC_CLK_OUT_CNTRL, data->gate_shift, 0, &clk_out_lock); *dt_clk = clk; clk_register_clkdev(clk, data->dev_name, data->gate_name); diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c index 933b5dd698b8..fd1a99c05c2d 100644 --- a/drivers/clk/tegra/clk-tegra114.c +++ b/drivers/clk/tegra/clk-tegra114.c @@ -819,6 +819,7 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = { [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA114_CLK_CLK_OUT_3_MUX, .present = true }, [tegra_clk_dsia_mux] = { .dt_id = TEGRA114_CLK_DSIA_MUX, .present = true }, [tegra_clk_dsib_mux] = { .dt_id = TEGRA114_CLK_DSIB_MUX, .present = true }, + [tegra_clk_cec] = { .dt_id = TEGRA114_CLK_CEC, .present = true }, }; static struct tegra_devclk devclks[] __initdata = { diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c index a112d3d2bff1..e81ea5b11577 100644 --- a/drivers/clk/tegra/clk-tegra124.c +++ b/drivers/clk/tegra/clk-tegra124.c @@ -928,6 +928,7 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_1_MUX, .present = true }, [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_2_MUX, .present = true }, [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true }, + [tegra_clk_cec] = { .dt_id = TEGRA124_CLK_CEC, .present = true }, }; static struct tegra_devclk devclks[] __initdata = { diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 2896d2e783ce..1024e853ea65 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -24,6 +24,8 @@ #include <linux/export.h> #include <linux/clk/tegra.h> #include <dt-bindings/clock/tegra210-car.h> +#include <dt-bindings/reset/tegra210-car.h> +#include <linux/iopoll.h> #include "clk.h" #include "clk-id.h" @@ -155,9 +157,35 @@ #define PMC_PLLM_WB0_OVERRIDE 0x1dc #define PMC_PLLM_WB0_OVERRIDE_2 0x2b0 +#define UTMIP_PLL_CFG2 0x488 +#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xfff) << 6) +#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18) +#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN BIT(0) +#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERUP BIT(1) +#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN BIT(2) +#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERUP BIT(3) +#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN BIT(4) +#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERUP BIT(5) +#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERDOWN BIT(24) +#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERUP BIT(25) + +#define UTMIP_PLL_CFG1 0x484 +#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27) +#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0) +#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP BIT(17) +#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN BIT(16) +#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP BIT(15) +#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14) +#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12) + #define SATA_PLL_CFG0 0x490 #define SATA_PLL_CFG0_PADPLL_RESET_SWCTL BIT(0) #define SATA_PLL_CFG0_PADPLL_USE_LOCKDET BIT(2) +#define SATA_PLL_CFG0_SATA_SEQ_IN_SWCTL BIT(4) +#define SATA_PLL_CFG0_SATA_SEQ_RESET_INPUT_VALUE BIT(5) +#define SATA_PLL_CFG0_SATA_SEQ_LANE_PD_INPUT_VALUE BIT(6) +#define SATA_PLL_CFG0_SATA_SEQ_PADPLL_PD_INPUT_VALUE BIT(7) + #define SATA_PLL_CFG0_PADPLL_SLEEP_IDDQ BIT(13) #define SATA_PLL_CFG0_SEQ_ENABLE BIT(24) @@ -196,6 +224,12 @@ #define CLK_M_DIVISOR_SHIFT 2 #define CLK_M_DIVISOR_MASK 0x3 +#define RST_DFLL_DVCO 0x2f4 +#define DVFS_DFLL_RESET_SHIFT 0 + +#define CLK_RST_CONTROLLER_RST_DEV_Y_SET 0x2a8 +#define CLK_RST_CONTROLLER_RST_DEV_Y_CLR 0x2ac + /* * SDM fractional divisor is 16-bit 2's complement signed number within * (-2^12 ... 2^12-1) range. Represented in PLL data structure as unsigned @@ -454,6 +488,26 @@ void tegra210_sata_pll_hw_sequence_start(void) } EXPORT_SYMBOL_GPL(tegra210_sata_pll_hw_sequence_start); +void tegra210_set_sata_pll_seq_sw(bool state) +{ + u32 val; + + val = readl_relaxed(clk_base + SATA_PLL_CFG0); + if (state) { + val |= SATA_PLL_CFG0_SATA_SEQ_IN_SWCTL; + val |= SATA_PLL_CFG0_SATA_SEQ_RESET_INPUT_VALUE; + val |= SATA_PLL_CFG0_SATA_SEQ_LANE_PD_INPUT_VALUE; + val |= SATA_PLL_CFG0_SATA_SEQ_PADPLL_PD_INPUT_VALUE; + } else { + val &= ~SATA_PLL_CFG0_SATA_SEQ_IN_SWCTL; + val &= ~SATA_PLL_CFG0_SATA_SEQ_RESET_INPUT_VALUE; + val &= ~SATA_PLL_CFG0_SATA_SEQ_LANE_PD_INPUT_VALUE; + val &= ~SATA_PLL_CFG0_SATA_SEQ_PADPLL_PD_INPUT_VALUE; + } + writel_relaxed(val, clk_base + SATA_PLL_CFG0); +} +EXPORT_SYMBOL_GPL(tegra210_set_sata_pll_seq_sw); + static inline void _pll_misc_chk_default(void __iomem *base, struct tegra_clk_pll_params *params, u8 misc_num, u32 default_val, u32 mask) @@ -501,12 +555,12 @@ static void tegra210_pllcx_set_defaults(const char *name, { pllcx->params->defaults_set = true; - if (readl_relaxed(clk_base + pllcx->params->base_reg) & - PLL_ENABLE) { + if (readl_relaxed(clk_base + pllcx->params->base_reg) & PLL_ENABLE) { /* PLL is ON: only check if defaults already set */ pllcx_check_defaults(pllcx->params); - pr_warn("%s already enabled. Postponing set full defaults\n", - name); + if (!pllcx->params->defaults_set) + pr_warn("%s already enabled. Postponing set full defaults\n", + name); return; } @@ -608,7 +662,6 @@ static void tegra210_plld_set_defaults(struct tegra_clk_pll *plld) if (readl_relaxed(clk_base + plld->params->base_reg) & PLL_ENABLE) { - pr_warn("PLL_D already enabled. Postponing set full defaults\n"); /* * PLL is ON: check if defaults already set, then set those @@ -625,6 +678,9 @@ static void tegra210_plld_set_defaults(struct tegra_clk_pll *plld) _pll_misc_chk_default(clk_base, plld->params, 0, val, ~mask & PLLD_MISC0_WRITE_MASK); + if (!plld->params->defaults_set) + pr_warn("PLL_D already enabled. Postponing set full defaults\n"); + /* Enable lock detect */ mask = PLLD_MISC0_LOCK_ENABLE | PLLD_MISC0_LOCK_OVERRIDE; val = readl_relaxed(clk_base + plld->params->ext_misc_reg[0]); @@ -896,7 +952,6 @@ static void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx) val |= step_b << PLLX_MISC2_DYNRAMP_STEPB_SHIFT; if (readl_relaxed(clk_base + pllx->params->base_reg) & PLL_ENABLE) { - pr_warn("PLL_X already enabled. Postponing set full defaults\n"); /* * PLL is ON: check if defaults already set, then set those @@ -904,6 +959,8 @@ static void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx) */ pllx_check_defaults(pllx); + if (!pllx->params->defaults_set) + pr_warn("PLL_X already enabled. Postponing set full defaults\n"); /* Configure dyn ramp, disable lock override */ writel_relaxed(val, clk_base + pllx->params->ext_misc_reg[2]); @@ -948,7 +1005,6 @@ static void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb) pllmb->params->defaults_set = true; if (val & PLL_ENABLE) { - pr_warn("PLL_MB already enabled. Postponing set full defaults\n"); /* * PLL is ON: check if defaults already set, then set those @@ -959,6 +1015,8 @@ static void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb) _pll_misc_chk_default(clk_base, pllmb->params, 0, val, ~mask & PLLMB_MISC1_WRITE_MASK); + if (!pllmb->params->defaults_set) + pr_warn("PLL_MB already enabled. Postponing set full defaults\n"); /* Enable lock detect */ val = readl_relaxed(clk_base + pllmb->params->ext_misc_reg[0]); val &= ~mask; @@ -1008,13 +1066,14 @@ static void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp) pllp->params->defaults_set = true; if (val & PLL_ENABLE) { - pr_warn("PLL_P already enabled. Postponing set full defaults\n"); /* * PLL is ON: check if defaults already set, then set those * that can be updated in flight. */ pllp_check_defaults(pllp, true); + if (!pllp->params->defaults_set) + pr_warn("PLL_P already enabled. Postponing set full defaults\n"); /* Enable lock detect */ val = readl_relaxed(clk_base + pllp->params->ext_misc_reg[0]); @@ -1046,47 +1105,49 @@ static void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp) * Both VCO and post-divider output rates are fixed at 480MHz and 240MHz, * respectively. */ -static void pllu_check_defaults(struct tegra_clk_pll *pll, bool hw_control) +static void pllu_check_defaults(struct tegra_clk_pll_params *params, + bool hw_control) { u32 val, mask; /* Ignore lock enable (will be set) and IDDQ if under h/w control */ val = PLLU_MISC0_DEFAULT_VALUE & (~PLLU_MISC0_IDDQ); mask = PLLU_MISC0_LOCK_ENABLE | (hw_control ? PLLU_MISC0_IDDQ : 0); - _pll_misc_chk_default(clk_base, pll->params, 0, val, + _pll_misc_chk_default(clk_base, params, 0, val, ~mask & PLLU_MISC0_WRITE_MASK); val = PLLU_MISC1_DEFAULT_VALUE; mask = PLLU_MISC1_LOCK_OVERRIDE; - _pll_misc_chk_default(clk_base, pll->params, 1, val, + _pll_misc_chk_default(clk_base, params, 1, val, ~mask & PLLU_MISC1_WRITE_MASK); } -static void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu) +static void tegra210_pllu_set_defaults(struct tegra_clk_pll_params *pllu) { - u32 val = readl_relaxed(clk_base + pllu->params->base_reg); + u32 val = readl_relaxed(clk_base + pllu->base_reg); - pllu->params->defaults_set = true; + pllu->defaults_set = true; if (val & PLL_ENABLE) { - pr_warn("PLL_U already enabled. Postponing set full defaults\n"); /* * PLL is ON: check if defaults already set, then set those * that can be updated in flight. */ pllu_check_defaults(pllu, false); + if (!pllu->defaults_set) + pr_warn("PLL_U already enabled. Postponing set full defaults\n"); /* Enable lock detect */ - val = readl_relaxed(clk_base + pllu->params->ext_misc_reg[0]); + val = readl_relaxed(clk_base + pllu->ext_misc_reg[0]); val &= ~PLLU_MISC0_LOCK_ENABLE; val |= PLLU_MISC0_DEFAULT_VALUE & PLLU_MISC0_LOCK_ENABLE; - writel_relaxed(val, clk_base + pllu->params->ext_misc_reg[0]); + writel_relaxed(val, clk_base + pllu->ext_misc_reg[0]); - val = readl_relaxed(clk_base + pllu->params->ext_misc_reg[1]); + val = readl_relaxed(clk_base + pllu->ext_misc_reg[1]); val &= ~PLLU_MISC1_LOCK_OVERRIDE; val |= PLLU_MISC1_DEFAULT_VALUE & PLLU_MISC1_LOCK_OVERRIDE; - writel_relaxed(val, clk_base + pllu->params->ext_misc_reg[1]); + writel_relaxed(val, clk_base + pllu->ext_misc_reg[1]); udelay(1); return; @@ -1094,9 +1155,9 @@ static void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu) /* set IDDQ, enable lock detect */ writel_relaxed(PLLU_MISC0_DEFAULT_VALUE, - clk_base + pllu->params->ext_misc_reg[0]); + clk_base + pllu->ext_misc_reg[0]); writel_relaxed(PLLU_MISC1_DEFAULT_VALUE, - clk_base + pllu->params->ext_misc_reg[1]); + clk_base + pllu->ext_misc_reg[1]); udelay(1); } @@ -1216,6 +1277,7 @@ static int tegra210_pll_fixed_mdiv_cfg(struct clk_hw *hw, cfg->n = p_rate / cf; cfg->sdm_data = 0; + cfg->output_rate = input_rate; if (params->sdm_ctrl_reg) { unsigned long rem = p_rate - cf * cfg->n; /* If ssc is enabled SDM enabled as well, even for integer n */ @@ -1226,10 +1288,15 @@ static int tegra210_pll_fixed_mdiv_cfg(struct clk_hw *hw, s -= PLL_SDM_COEFF / 2; cfg->sdm_data = sdin_din_to_data(s); } + cfg->output_rate *= cfg->n * PLL_SDM_COEFF + PLL_SDM_COEFF/2 + + sdin_data_to_din(cfg->sdm_data); + cfg->output_rate /= p * cfg->m * PLL_SDM_COEFF; + } else { + cfg->output_rate *= cfg->n; + cfg->output_rate /= p * cfg->m; } cfg->input_rate = input_rate; - cfg->output_rate = rate; return 0; } @@ -1772,7 +1839,7 @@ static struct tegra_clk_pll_params pll_a1_params = { .misc_reg = PLLA1_MISC0, .lock_mask = PLLCX_BASE_LOCK, .lock_delay = 300, - .iddq_reg = PLLA1_MISC0, + .iddq_reg = PLLA1_MISC1, .iddq_bit_idx = PLLCX_IDDQ_BIT, .reset_reg = PLLA1_MISC0, .reset_bit_idx = PLLCX_RESET_BIT, @@ -1987,9 +2054,9 @@ static struct div_nmp pllu_nmp = { }; static struct tegra_clk_pll_freq_table pll_u_freq_table[] = { - { 12000000, 480000000, 40, 1, 1, 0 }, - { 13000000, 480000000, 36, 1, 1, 0 }, /* actual: 468.0 MHz */ - { 38400000, 480000000, 25, 2, 1, 0 }, + { 12000000, 480000000, 40, 1, 0, 0 }, + { 13000000, 480000000, 36, 1, 0, 0 }, /* actual: 468.0 MHz */ + { 38400000, 480000000, 25, 2, 0, 0 }, { 0, 0, 0, 0, 0, 0 }, }; @@ -2013,8 +2080,47 @@ static struct tegra_clk_pll_params pll_u_vco_params = { .div_nmp = &pllu_nmp, .freq_table = pll_u_freq_table, .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT, - .set_defaults = tegra210_pllu_set_defaults, - .calc_rate = tegra210_pll_fixed_mdiv_cfg, +}; + +struct utmi_clk_param { + /* Oscillator Frequency in KHz */ + u32 osc_frequency; + /* UTMIP PLL Enable Delay Count */ + u8 enable_delay_count; + /* UTMIP PLL Stable count */ + u16 stable_count; + /* UTMIP PLL Active delay count */ + u8 active_delay_count; + /* UTMIP PLL Xtal frequency count */ + u16 xtal_freq_count; +}; + +static const struct utmi_clk_param utmi_parameters[] = { + { + .osc_frequency = 38400000, .enable_delay_count = 0x0, + .stable_count = 0x0, .active_delay_count = 0x6, + .xtal_freq_count = 0x80 + }, { + .osc_frequency = 13000000, .enable_delay_count = 0x02, + .stable_count = 0x33, .active_delay_count = 0x05, + .xtal_freq_count = 0x7f + }, { + .osc_frequency = 19200000, .enable_delay_count = 0x03, + .stable_count = 0x4b, .active_delay_count = 0x06, + .xtal_freq_count = 0xbb + }, { + .osc_frequency = 12000000, .enable_delay_count = 0x02, + .stable_count = 0x2f, .active_delay_count = 0x08, + .xtal_freq_count = 0x76 + }, { + .osc_frequency = 26000000, .enable_delay_count = 0x04, + .stable_count = 0x66, .active_delay_count = 0x09, + .xtal_freq_count = 0xfe + }, { + .osc_frequency = 16800000, .enable_delay_count = 0x03, + .stable_count = 0x41, .active_delay_count = 0x0a, + .xtal_freq_count = 0xa4 + }, }; static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = { @@ -2115,7 +2221,6 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = { [tegra_clk_pll_c2] = { .dt_id = TEGRA210_CLK_PLL_C2, .present = true }, [tegra_clk_pll_c3] = { .dt_id = TEGRA210_CLK_PLL_C3, .present = true }, [tegra_clk_pll_m] = { .dt_id = TEGRA210_CLK_PLL_M, .present = true }, - [tegra_clk_pll_m_out1] = { .dt_id = TEGRA210_CLK_PLL_M_OUT1, .present = true }, [tegra_clk_pll_p] = { .dt_id = TEGRA210_CLK_PLL_P, .present = true }, [tegra_clk_pll_p_out1] = { .dt_id = TEGRA210_CLK_PLL_P_OUT1, .present = true }, [tegra_clk_pll_p_out3] = { .dt_id = TEGRA210_CLK_PLL_P_OUT3, .present = true }, @@ -2209,6 +2314,25 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = { [tegra_clk_pll_c4_out2] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT2, .present = true }, [tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true }, [tegra_clk_apb2ape] = { .dt_id = TEGRA210_CLK_APB2APE, .present = true }, + [tegra_clk_pll_a1] = { .dt_id = TEGRA210_CLK_PLL_A1, .present = true }, + [tegra_clk_ispa] = { .dt_id = TEGRA210_CLK_ISPA, .present = true }, + [tegra_clk_cec] = { .dt_id = TEGRA210_CLK_CEC, .present = true }, + [tegra_clk_dmic1] = { .dt_id = TEGRA210_CLK_DMIC1, .present = true }, + [tegra_clk_dmic2] = { .dt_id = TEGRA210_CLK_DMIC2, .present = true }, + [tegra_clk_dmic3] = { .dt_id = TEGRA210_CLK_DMIC3, .present = true }, + [tegra_clk_dmic1_sync_clk] = { .dt_id = TEGRA210_CLK_DMIC1_SYNC_CLK, .present = true }, + [tegra_clk_dmic2_sync_clk] = { .dt_id = TEGRA210_CLK_DMIC2_SYNC_CLK, .present = true }, + [tegra_clk_dmic3_sync_clk] = { .dt_id = TEGRA210_CLK_DMIC3_SYNC_CLK, .present = true }, + [tegra_clk_dmic1_sync_clk_mux] = { .dt_id = TEGRA210_CLK_DMIC1_SYNC_CLK_MUX, .present = true }, + [tegra_clk_dmic2_sync_clk_mux] = { .dt_id = TEGRA210_CLK_DMIC2_SYNC_CLK_MUX, .present = true }, + [tegra_clk_dmic3_sync_clk_mux] = { .dt_id = TEGRA210_CLK_DMIC3_SYNC_CLK_MUX, .present = true }, + [tegra_clk_dp2] = { .dt_id = TEGRA210_CLK_DP2, .present = true }, + [tegra_clk_iqc1] = { .dt_id = TEGRA210_CLK_IQC1, .present = true }, + [tegra_clk_iqc2] = { .dt_id = TEGRA210_CLK_IQC2, .present = true }, + [tegra_clk_pll_a_out_adsp] = { .dt_id = TEGRA210_CLK_PLL_A_OUT_ADSP, .present = true }, + [tegra_clk_pll_a_out0_out_adsp] = { .dt_id = TEGRA210_CLK_PLL_A_OUT0_OUT_ADSP, .present = true }, + [tegra_clk_adsp] = { .dt_id = TEGRA210_CLK_ADSP, .present = true }, + [tegra_clk_adsp_neon] = { .dt_id = TEGRA210_CLK_ADSP_NEON, .present = true }, }; static struct tegra_devclk devclks[] __initdata = { @@ -2227,7 +2351,6 @@ static struct tegra_devclk devclks[] __initdata = { { .con_id = "pll_p_out3", .dt_id = TEGRA210_CLK_PLL_P_OUT3 }, { .con_id = "pll_p_out4", .dt_id = TEGRA210_CLK_PLL_P_OUT4 }, { .con_id = "pll_m", .dt_id = TEGRA210_CLK_PLL_M }, - { .con_id = "pll_m_out1", .dt_id = TEGRA210_CLK_PLL_M_OUT1 }, { .con_id = "pll_x", .dt_id = TEGRA210_CLK_PLL_X }, { .con_id = "pll_x_out0", .dt_id = TEGRA210_CLK_PLL_X_OUT0 }, { .con_id = "pll_u", .dt_id = TEGRA210_CLK_PLL_U }, @@ -2286,6 +2409,221 @@ static struct tegra_audio_clk_info tegra210_audio_plls[] = { static struct clk **clks; +static const char * const aclk_parents[] = { + "pll_a1", "pll_c", "pll_p", "pll_a_out0", "pll_c2", "pll_c3", + "clk_m" +}; + +void tegra210_put_utmipll_in_iddq(void) +{ + u32 reg; + + reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0); + + if (reg & UTMIPLL_HW_PWRDN_CFG0_UTMIPLL_LOCK) { + pr_err("trying to assert IDDQ while UTMIPLL is locked\n"); + return; + } + + reg |= UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE; + writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0); +} +EXPORT_SYMBOL_GPL(tegra210_put_utmipll_in_iddq); + +void tegra210_put_utmipll_out_iddq(void) +{ + u32 reg; + + reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0); + reg &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE; + writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0); +} +EXPORT_SYMBOL_GPL(tegra210_put_utmipll_out_iddq); + +static void tegra210_utmi_param_configure(void) +{ + u32 reg; + int i; + + for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) { + if (osc_freq == utmi_parameters[i].osc_frequency) + break; + } + + if (i >= ARRAY_SIZE(utmi_parameters)) { + pr_err("%s: Unexpected oscillator freq %lu\n", __func__, + osc_freq); + return; + } + + reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0); + reg &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE; + writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0); + + udelay(10); + + reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2); + + /* Program UTMIP PLL stable and active counts */ + /* [FIXME] arclk_rst.h says WRONG! This should be 1ms -> 0x50 Check! */ + reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0); + reg |= UTMIP_PLL_CFG2_STABLE_COUNT(utmi_parameters[i].stable_count); + + reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0); + + reg |= + UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(utmi_parameters[i].active_delay_count); + writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2); + + /* Program UTMIP PLL delay and oscillator frequency counts */ + reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1); + reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0); + + reg |= + UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(utmi_parameters[i].enable_delay_count); + + reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0); + reg |= + UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(utmi_parameters[i].xtal_freq_count); + + reg |= UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN; + writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1); + + /* Remove power downs from UTMIP PLL control bits */ + reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1); + reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; + reg |= UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP; + writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1); + udelay(1); + + /* Enable samplers for SNPS, XUSB_HOST, XUSB_DEV */ + reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2); + reg |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERUP; + reg |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERUP; + reg |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERUP; + reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN; + reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN; + reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERDOWN; + writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2); + + /* Setup HW control of UTMIPLL */ + reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1); + reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; + reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP; + writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1); + + reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0); + reg |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET; + reg &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL; + writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0); + + udelay(1); + + reg = readl_relaxed(clk_base + XUSB_PLL_CFG0); + reg &= ~XUSB_PLL_CFG0_UTMIPLL_LOCK_DLY; + writel_relaxed(reg, clk_base + XUSB_PLL_CFG0); + + udelay(1); + + /* Enable HW control UTMIPLL */ + reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0); + reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE; + writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0); +} + +static int tegra210_enable_pllu(void) +{ + struct tegra_clk_pll_freq_table *fentry; + struct tegra_clk_pll pllu; + u32 reg; + + for (fentry = pll_u_freq_table; fentry->input_rate; fentry++) { + if (fentry->input_rate == pll_ref_freq) + break; + } + + if (!fentry->input_rate) { + pr_err("Unknown PLL_U reference frequency %lu\n", pll_ref_freq); + return -EINVAL; + } + + /* clear IDDQ bit */ + pllu.params = &pll_u_vco_params; + reg = readl_relaxed(clk_base + pllu.params->ext_misc_reg[0]); + reg &= ~BIT(pllu.params->iddq_bit_idx); + writel_relaxed(reg, clk_base + pllu.params->ext_misc_reg[0]); + + reg = readl_relaxed(clk_base + PLLU_BASE); + reg &= ~GENMASK(20, 0); + reg |= fentry->m; + reg |= fentry->n << 8; + reg |= fentry->p << 16; + writel(reg, clk_base + PLLU_BASE); + reg |= PLL_ENABLE; + writel(reg, clk_base + PLLU_BASE); + + readl_relaxed_poll_timeout(clk_base + PLLU_BASE, reg, + reg & PLL_BASE_LOCK, 2, 1000); + if (!(reg & PLL_BASE_LOCK)) { + pr_err("Timed out waiting for PLL_U to lock\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int tegra210_init_pllu(void) +{ + u32 reg; + int err; + + tegra210_pllu_set_defaults(&pll_u_vco_params); + /* skip initialization when pllu is in hw controlled mode */ + reg = readl_relaxed(clk_base + PLLU_BASE); + if (reg & PLLU_BASE_OVERRIDE) { + if (!(reg & PLL_ENABLE)) { + err = tegra210_enable_pllu(); + if (err < 0) { + WARN_ON(1); + return err; + } + } + /* enable hw controlled mode */ + reg = readl_relaxed(clk_base + PLLU_BASE); + reg &= ~PLLU_BASE_OVERRIDE; + writel(reg, clk_base + PLLU_BASE); + + reg = readl_relaxed(clk_base + PLLU_HW_PWRDN_CFG0); + reg |= PLLU_HW_PWRDN_CFG0_IDDQ_PD_INCLUDE | + PLLU_HW_PWRDN_CFG0_USE_SWITCH_DETECT | + PLLU_HW_PWRDN_CFG0_USE_LOCKDET; + reg &= ~(PLLU_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL | + PLLU_HW_PWRDN_CFG0_CLK_SWITCH_SWCTL); + writel_relaxed(reg, clk_base + PLLU_HW_PWRDN_CFG0); + + reg = readl_relaxed(clk_base + XUSB_PLL_CFG0); + reg &= ~XUSB_PLL_CFG0_PLLU_LOCK_DLY_MASK; + writel_relaxed(reg, clk_base + XUSB_PLL_CFG0); + udelay(1); + + reg = readl_relaxed(clk_base + PLLU_HW_PWRDN_CFG0); + reg |= PLLU_HW_PWRDN_CFG0_SEQ_ENABLE; + writel_relaxed(reg, clk_base + PLLU_HW_PWRDN_CFG0); + udelay(1); + + reg = readl_relaxed(clk_base + PLLU_BASE); + reg &= ~PLLU_BASE_CLKENABLE_USB; + writel_relaxed(reg, clk_base + PLLU_BASE); + } + + /* enable UTMIPLL hw control if not yet done by the bootloader */ + reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0); + if (!(reg & UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE)) + tegra210_utmi_param_configure(); + + return 0; +} + static __init void tegra210_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base) { @@ -2347,6 +2685,11 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base, clk_register_clkdev(clk, "cml1", NULL); clks[TEGRA210_CLK_CML1] = clk; + clk = tegra_clk_register_super_clk("aclk", aclk_parents, + ARRAY_SIZE(aclk_parents), 0, clk_base + 0x6e0, + 0, NULL); + clks[TEGRA210_CLK_ACLK] = clk; + tegra_periph_clk_init(clk_base, pmc_base, tegra210_clks, &pll_p_params); } @@ -2402,9 +2745,6 @@ static void __init tegra210_pll_init(void __iomem *clk_base, clk_register_clkdev(clk, "pll_mb", NULL); clks[TEGRA210_CLK_PLL_MB] = clk; - clk_register_clkdev(clk, "pll_m_out1", NULL); - clks[TEGRA210_CLK_PLL_M_OUT1] = clk; - /* PLLM_UD */ clk = clk_register_fixed_factor(NULL, "pll_m_ud", "pll_m", CLK_SET_RATE_PARENT, 1, 1); @@ -2412,11 +2752,12 @@ static void __init tegra210_pll_init(void __iomem *clk_base, clks[TEGRA210_CLK_PLL_M_UD] = clk; /* PLLU_VCO */ - clk = tegra_clk_register_pllu_tegra210("pll_u_vco", "pll_ref", - clk_base, 0, &pll_u_vco_params, - &pll_u_lock); - clk_register_clkdev(clk, "pll_u_vco", NULL); - clks[TEGRA210_CLK_PLL_U] = clk; + if (!tegra210_init_pllu()) { + clk = clk_register_fixed_rate(NULL, "pll_u_vco", "pll_ref", 0, + 480*1000*1000); + clk_register_clkdev(clk, "pll_u_vco", NULL); + clks[TEGRA210_CLK_PLL_U] = clk; + } /* PLLU_OUT */ clk = clk_register_divider_table(NULL, "pll_u_out", "pll_u_vco", 0, @@ -2651,6 +2992,8 @@ static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA210_CLK_EMC, TEGRA210_CLK_CLK_MAX, 0, 1 }, { TEGRA210_CLK_MSELECT, TEGRA210_CLK_CLK_MAX, 0, 1 }, { TEGRA210_CLK_CSITE, TEGRA210_CLK_CLK_MAX, 0, 1 }, + /* TODO find a way to enable this on-demand */ + { TEGRA210_CLK_DBGAPB, TEGRA210_CLK_CLK_MAX, 0, 1 }, { TEGRA210_CLK_TSENSOR, TEGRA210_CLK_CLK_M, 400000, 0 }, { TEGRA210_CLK_I2C1, TEGRA210_CLK_PLL_P, 0, 0 }, { TEGRA210_CLK_I2C2, TEGRA210_CLK_PLL_P, 0, 0 }, @@ -2661,6 +3004,8 @@ static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA210_CLK_PLL_DP, TEGRA210_CLK_CLK_MAX, 270000000, 0 }, { TEGRA210_CLK_SOC_THERM, TEGRA210_CLK_PLL_P, 51000000, 0 }, { TEGRA210_CLK_CCLK_G, TEGRA210_CLK_CLK_MAX, 0, 1 }, + { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 }, + { TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 }, /* This MUST be the last entry. */ { TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 }, }; @@ -2679,6 +3024,81 @@ static void __init tegra210_clock_apply_init_table(void) } /** + * tegra210_car_barrier - wait for pending writes to the CAR to complete + * + * Wait for any outstanding writes to the CAR MMIO space from this CPU + * to complete before continuing execution. No return value. + */ +static void tegra210_car_barrier(void) +{ + readl_relaxed(clk_base + RST_DFLL_DVCO); +} + +/** + * tegra210_clock_assert_dfll_dvco_reset - assert the DFLL's DVCO reset + * + * Assert the reset line of the DFLL's DVCO. No return value. + */ +static void tegra210_clock_assert_dfll_dvco_reset(void) +{ + u32 v; + + v = readl_relaxed(clk_base + RST_DFLL_DVCO); + v |= (1 << DVFS_DFLL_RESET_SHIFT); + writel_relaxed(v, clk_base + RST_DFLL_DVCO); + tegra210_car_barrier(); +} + +/** + * tegra210_clock_deassert_dfll_dvco_reset - deassert the DFLL's DVCO reset + * + * Deassert the reset line of the DFLL's DVCO, allowing the DVCO to + * operate. No return value. + */ +static void tegra210_clock_deassert_dfll_dvco_reset(void) +{ + u32 v; + + v = readl_relaxed(clk_base + RST_DFLL_DVCO); + v &= ~(1 << DVFS_DFLL_RESET_SHIFT); + writel_relaxed(v, clk_base + RST_DFLL_DVCO); + tegra210_car_barrier(); +} + +static int tegra210_reset_assert(unsigned long id) +{ + if (id == TEGRA210_RST_DFLL_DVCO) + tegra210_clock_assert_dfll_dvco_reset(); + else if (id == TEGRA210_RST_ADSP) + writel(GENMASK(26, 21) | BIT(7), + clk_base + CLK_RST_CONTROLLER_RST_DEV_Y_SET); + else + return -EINVAL; + + return 0; +} + +static int tegra210_reset_deassert(unsigned long id) +{ + if (id == TEGRA210_RST_DFLL_DVCO) + tegra210_clock_deassert_dfll_dvco_reset(); + else if (id == TEGRA210_RST_ADSP) { + writel(BIT(21), clk_base + CLK_RST_CONTROLLER_RST_DEV_Y_CLR); + /* + * Considering adsp cpu clock (min: 12.5MHZ, max: 1GHz) + * a delay of 5us ensures that it's at least + * 6 * adsp_cpu_cycle_period long. + */ + udelay(5); + writel(GENMASK(26, 22) | BIT(7), + clk_base + CLK_RST_CONTROLLER_RST_DEV_Y_CLR); + } else + return -EINVAL; + + return 0; +} + +/** * tegra210_clock_init - Tegra210-specific clock initialization * @np: struct device_node * of the DT node for the SoC CAR IP block * @@ -2742,6 +3162,9 @@ static void __init tegra210_clock_init(struct device_node *np) tegra_super_clk_gen5_init(clk_base, pmc_base, tegra210_clks, &pll_x_params); + tegra_init_special_resets(2, tegra210_reset_assert, + tegra210_reset_deassert); + tegra_add_of_provider(np); tegra_register_devclks(devclks, ARRAY_SIZE(devclks)); diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c index 8e2db5ead8da..a2d163f759b4 100644 --- a/drivers/clk/tegra/clk-tegra30.c +++ b/drivers/clk/tegra/clk-tegra30.c @@ -817,6 +817,7 @@ static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = { [tegra_clk_pll_p_out4] = { .dt_id = TEGRA30_CLK_PLL_P_OUT4, .present = true }, [tegra_clk_pll_a] = { .dt_id = TEGRA30_CLK_PLL_A, .present = true }, [tegra_clk_pll_a_out0] = { .dt_id = TEGRA30_CLK_PLL_A_OUT0, .present = true }, + [tegra_clk_cec] = { .dt_id = TEGRA30_CLK_CEC, .present = true }, }; static const char *pll_e_parents[] = { "pll_ref", "pll_p" }; diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c index b2cdd9a235f4..ba923f0d5953 100644 --- a/drivers/clk/tegra/clk.c +++ b/drivers/clk/tegra/clk.c @@ -17,6 +17,7 @@ #include <linux/clkdev.h> #include <linux/clk.h> #include <linux/clk-provider.h> +#include <linux/delay.h> #include <linux/of.h> #include <linux/clk/tegra.h> #include <linux/reset-controller.h> @@ -182,6 +183,20 @@ static int tegra_clk_rst_deassert(struct reset_controller_dev *rcdev, return -EINVAL; } +static int tegra_clk_rst_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + int err; + + err = tegra_clk_rst_assert(rcdev, id); + if (err) + return err; + + udelay(1); + + return tegra_clk_rst_deassert(rcdev, id); +} + const struct tegra_clk_periph_regs *get_reg_bank(int clkid) { int reg_bank = clkid / 32; @@ -274,6 +289,7 @@ void __init tegra_init_from_table(struct tegra_clk_init_table *tbl, static const struct reset_control_ops rst_ops = { .assert = tegra_clk_rst_assert, .deassert = tegra_clk_rst_deassert, + .reset = tegra_clk_rst_reset, }; static struct reset_controller_dev rst_ctlr = { diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h index 6ba82ecffd4d..945b07093afa 100644 --- a/drivers/clk/tegra/clk.h +++ b/drivers/clk/tegra/clk.h @@ -116,7 +116,7 @@ struct tegra_clk_pll_freq_table { unsigned long input_rate; unsigned long output_rate; u32 n; - u16 m; + u32 m; u8 p; u8 cpcon; u16 sdm_data; @@ -586,11 +586,11 @@ struct tegra_clk_periph { extern const struct clk_ops tegra_clk_periph_ops; struct clk *tegra_clk_register_periph(const char *name, - const char **parent_names, int num_parents, + const char * const *parent_names, int num_parents, struct tegra_clk_periph *periph, void __iomem *clk_base, u32 offset, unsigned long flags); struct clk *tegra_clk_register_periph_nodiv(const char *name, - const char **parent_names, int num_parents, + const char * const *parent_names, int num_parents, struct tegra_clk_periph *periph, void __iomem *clk_base, u32 offset); @@ -626,7 +626,7 @@ struct tegra_periph_init_data { const char *name; int clk_id; union { - const char **parent_names; + const char *const *parent_names; const char *parent_name; } p; int num_parents; @@ -686,6 +686,8 @@ struct tegra_periph_init_data { struct tegra_clk_super_mux { struct clk_hw hw; void __iomem *reg; + struct tegra_clk_frac_div frac_div; + const struct clk_ops *div_ops; u8 width; u8 flags; u8 div2_index; @@ -702,7 +704,10 @@ struct clk *tegra_clk_register_super_mux(const char *name, const char **parent_names, u8 num_parents, unsigned long flags, void __iomem *reg, u8 clk_super_flags, u8 width, u8 pllx_index, u8 div2_index, spinlock_t *lock); - +struct clk *tegra_clk_register_super_clk(const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, void __iomem *reg, u8 clk_super_flags, + spinlock_t *lock); /** * struct clk_init_table - clock initialization table * @clk_id: clock id as mentioned in device tree bindings diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c index 6411e132faa2..06f486b3488c 100644 --- a/drivers/clk/ti/apll.c +++ b/drivers/clk/ti/apll.c @@ -55,20 +55,20 @@ static int dra7_apll_enable(struct clk_hw *hw) state <<= __ffs(ad->idlest_mask); /* Check is already locked */ - v = ti_clk_ll_ops->clk_readl(ad->idlest_reg); + v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg); if ((v & ad->idlest_mask) == state) return r; - v = ti_clk_ll_ops->clk_readl(ad->control_reg); + v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ~ad->enable_mask; v |= APLL_FORCE_LOCK << __ffs(ad->enable_mask); - ti_clk_ll_ops->clk_writel(v, ad->control_reg); + ti_clk_ll_ops->clk_writel(v, &ad->control_reg); state <<= __ffs(ad->idlest_mask); while (1) { - v = ti_clk_ll_ops->clk_readl(ad->idlest_reg); + v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg); if ((v & ad->idlest_mask) == state) break; if (i > MAX_APLL_WAIT_TRIES) @@ -99,10 +99,10 @@ static void dra7_apll_disable(struct clk_hw *hw) state <<= __ffs(ad->idlest_mask); - v = ti_clk_ll_ops->clk_readl(ad->control_reg); + v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ~ad->enable_mask; v |= APLL_AUTO_IDLE << __ffs(ad->enable_mask); - ti_clk_ll_ops->clk_writel(v, ad->control_reg); + ti_clk_ll_ops->clk_writel(v, &ad->control_reg); } static int dra7_apll_is_enabled(struct clk_hw *hw) @@ -113,7 +113,7 @@ static int dra7_apll_is_enabled(struct clk_hw *hw) ad = clk->dpll_data; - v = ti_clk_ll_ops->clk_readl(ad->control_reg); + v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ad->enable_mask; v >>= __ffs(ad->enable_mask); @@ -164,7 +164,7 @@ static void __init omap_clk_register_apll(struct clk_hw *hw, ad->clk_bypass = __clk_get_hw(clk); - clk = clk_register(NULL, &clk_hw->hw); + clk = ti_clk_register(NULL, &clk_hw->hw, node->name); if (!IS_ERR(clk)) { of_clk_add_provider(node, of_clk_src_simple_get, clk); kfree(clk_hw->hw.init->parent_names); @@ -185,6 +185,7 @@ static void __init of_dra7_apll_setup(struct device_node *node) struct clk_hw_omap *clk_hw = NULL; struct clk_init_data *init = NULL; const char **parent_names = NULL; + int ret; ad = kzalloc(sizeof(*ad), GFP_KERNEL); clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); @@ -194,7 +195,6 @@ static void __init of_dra7_apll_setup(struct device_node *node) clk_hw->dpll_data = ad; clk_hw->hw.init = init; - clk_hw->flags = MEMMAP_ADDRESSING; init->name = node->name; init->ops = &apll_ck_ops; @@ -213,10 +213,10 @@ static void __init of_dra7_apll_setup(struct device_node *node) init->parent_names = parent_names; - ad->control_reg = ti_clk_get_reg_addr(node, 0); - ad->idlest_reg = ti_clk_get_reg_addr(node, 1); + ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg); + ret |= ti_clk_get_reg_addr(node, 1, &ad->idlest_reg); - if (IS_ERR(ad->control_reg) || IS_ERR(ad->idlest_reg)) + if (ret) goto cleanup; ad->idlest_mask = 0x1; @@ -242,7 +242,7 @@ static int omap2_apll_is_enabled(struct clk_hw *hw) struct dpll_data *ad = clk->dpll_data; u32 v; - v = ti_clk_ll_ops->clk_readl(ad->control_reg); + v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ad->enable_mask; v >>= __ffs(ad->enable_mask); @@ -268,13 +268,13 @@ static int omap2_apll_enable(struct clk_hw *hw) u32 v; int i = 0; - v = ti_clk_ll_ops->clk_readl(ad->control_reg); + v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ~ad->enable_mask; v |= OMAP2_EN_APLL_LOCKED << __ffs(ad->enable_mask); - ti_clk_ll_ops->clk_writel(v, ad->control_reg); + ti_clk_ll_ops->clk_writel(v, &ad->control_reg); while (1) { - v = ti_clk_ll_ops->clk_readl(ad->idlest_reg); + v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg); if (v & ad->idlest_mask) break; if (i > MAX_APLL_WAIT_TRIES) @@ -298,10 +298,10 @@ static void omap2_apll_disable(struct clk_hw *hw) struct dpll_data *ad = clk->dpll_data; u32 v; - v = ti_clk_ll_ops->clk_readl(ad->control_reg); + v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ~ad->enable_mask; v |= OMAP2_EN_APLL_STOPPED << __ffs(ad->enable_mask); - ti_clk_ll_ops->clk_writel(v, ad->control_reg); + ti_clk_ll_ops->clk_writel(v, &ad->control_reg); } static struct clk_ops omap2_apll_ops = { @@ -316,10 +316,10 @@ static void omap2_apll_set_autoidle(struct clk_hw_omap *clk, u32 val) struct dpll_data *ad = clk->dpll_data; u32 v; - v = ti_clk_ll_ops->clk_readl(ad->autoidle_reg); + v = ti_clk_ll_ops->clk_readl(&ad->autoidle_reg); v &= ~ad->autoidle_mask; v |= val << __ffs(ad->autoidle_mask); - ti_clk_ll_ops->clk_writel(v, ad->control_reg); + ti_clk_ll_ops->clk_writel(v, &ad->control_reg); } #define OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP 0x3 @@ -348,6 +348,7 @@ static void __init of_omap2_apll_setup(struct device_node *node) struct clk *clk; const char *parent_name; u32 val; + int ret; ad = kzalloc(sizeof(*ad), GFP_KERNEL); clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); @@ -393,12 +394,11 @@ static void __init of_omap2_apll_setup(struct device_node *node) ad->idlest_mask = 1 << val; - ad->control_reg = ti_clk_get_reg_addr(node, 0); - ad->autoidle_reg = ti_clk_get_reg_addr(node, 1); - ad->idlest_reg = ti_clk_get_reg_addr(node, 2); + ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg); + ret |= ti_clk_get_reg_addr(node, 1, &ad->autoidle_reg); + ret |= ti_clk_get_reg_addr(node, 2, &ad->idlest_reg); - if (IS_ERR(ad->control_reg) || IS_ERR(ad->autoidle_reg) || - IS_ERR(ad->idlest_reg)) + if (ret) goto cleanup; clk = clk_register(NULL, &clk_hw->hw); diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c index 345af43465f0..7bb9afbe4058 100644 --- a/drivers/clk/ti/autoidle.c +++ b/drivers/clk/ti/autoidle.c @@ -25,7 +25,7 @@ #include "clock.h" struct clk_ti_autoidle { - void __iomem *reg; + struct clk_omap_reg reg; u8 shift; u8 flags; const char *name; @@ -73,28 +73,28 @@ static void _allow_autoidle(struct clk_ti_autoidle *clk) { u32 val; - val = ti_clk_ll_ops->clk_readl(clk->reg); + val = ti_clk_ll_ops->clk_readl(&clk->reg); if (clk->flags & AUTOIDLE_LOW) val &= ~(1 << clk->shift); else val |= (1 << clk->shift); - ti_clk_ll_ops->clk_writel(val, clk->reg); + ti_clk_ll_ops->clk_writel(val, &clk->reg); } static void _deny_autoidle(struct clk_ti_autoidle *clk) { u32 val; - val = ti_clk_ll_ops->clk_readl(clk->reg); + val = ti_clk_ll_ops->clk_readl(&clk->reg); if (clk->flags & AUTOIDLE_LOW) val |= (1 << clk->shift); else val &= ~(1 << clk->shift); - ti_clk_ll_ops->clk_writel(val, clk->reg); + ti_clk_ll_ops->clk_writel(val, &clk->reg); } /** @@ -140,6 +140,7 @@ int __init of_ti_clk_autoidle_setup(struct device_node *node) { u32 shift; struct clk_ti_autoidle *clk; + int ret; /* Check if this clock has autoidle support or not */ if (of_property_read_u32(node, "ti,autoidle-shift", &shift)) @@ -152,11 +153,10 @@ int __init of_ti_clk_autoidle_setup(struct device_node *node) clk->shift = shift; clk->name = node->name; - clk->reg = ti_clk_get_reg_addr(node, 0); - - if (IS_ERR(clk->reg)) { + ret = ti_clk_get_reg_addr(node, 0, &clk->reg); + if (ret) { kfree(clk); - return -EINVAL; + return ret; } if (of_property_read_bool(node, "ti,invert-autoidle-bit")) diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c index 11d8aa3ec186..b1251cae98b8 100644 --- a/drivers/clk/ti/clk-3xxx.c +++ b/drivers/clk/ti/clk-3xxx.c @@ -52,14 +52,13 @@ * @idlest_reg and @idlest_bit. No return value. */ static void omap3430es2_clk_ssi_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, + struct clk_omap_reg *idlest_reg, u8 *idlest_bit, u8 *idlest_val) { - u32 r; - - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; + memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg)); + idlest_reg->offset &= ~0xf0; + idlest_reg->offset |= 0x20; *idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT; *idlest_val = OMAP34XX_CM_IDLEST_VAL; } @@ -85,15 +84,15 @@ const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait = { * default find_idlest code assumes that they are at the same * position.) No return value. */ -static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, - u8 *idlest_val) +static void +omap3430es2_clk_dss_usbhost_find_idlest(struct clk_hw_omap *clk, + struct clk_omap_reg *idlest_reg, + u8 *idlest_bit, u8 *idlest_val) { - u32 r; + memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg)); - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; + idlest_reg->offset &= ~0xf0; + idlest_reg->offset |= 0x20; /* USBHOST_IDLE has same shift */ *idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT; *idlest_val = OMAP34XX_CM_IDLEST_VAL; @@ -122,15 +121,15 @@ const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait = { * shift from the CM_{I,F}CLKEN bit. Pass back the correct info via * @idlest_reg and @idlest_bit. No return value. */ -static void omap3430es2_clk_hsotgusb_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, - u8 *idlest_val) +static void +omap3430es2_clk_hsotgusb_find_idlest(struct clk_hw_omap *clk, + struct clk_omap_reg *idlest_reg, + u8 *idlest_bit, + u8 *idlest_val) { - u32 r; - - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; + memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg)); + idlest_reg->offset &= ~0xf0; + idlest_reg->offset |= 0x20; *idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT; *idlest_val = OMAP34XX_CM_IDLEST_VAL; } @@ -154,11 +153,11 @@ const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait = { * bit. A value of 1 indicates that clock is enabled. */ static void am35xx_clk_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, + struct clk_omap_reg *idlest_reg, u8 *idlest_bit, u8 *idlest_val) { - *idlest_reg = (__force void __iomem *)(clk->enable_reg); + memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg)); *idlest_bit = clk->enable_bit + AM35XX_IPSS_ICK_EN_ACK_OFFSET; *idlest_val = AM35XX_IPSS_CLK_IDLEST_VAL; } @@ -178,10 +177,10 @@ static void am35xx_clk_find_idlest(struct clk_hw_omap *clk, * avoid this issue, and remove the casts. No return value. */ static void am35xx_clk_find_companion(struct clk_hw_omap *clk, - void __iomem **other_reg, + struct clk_omap_reg *other_reg, u8 *other_bit) { - *other_reg = (__force void __iomem *)(clk->enable_reg); + memcpy(other_reg, &clk->enable_reg, sizeof(*other_reg)); if (clk->enable_bit & AM35XX_IPSS_ICK_MASK) *other_bit = clk->enable_bit + AM35XX_IPSS_ICK_FCK_OFFSET; else @@ -205,14 +204,14 @@ const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait = { * and @idlest_bit. No return value. */ static void am35xx_clk_ipss_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, + struct clk_omap_reg *idlest_reg, u8 *idlest_bit, u8 *idlest_val) { - u32 r; + memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg)); - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; + idlest_reg->offset &= ~0xf0; + idlest_reg->offset |= 0x20; *idlest_bit = AM35XX_ST_IPSS_SHIFT; *idlest_val = OMAP34XX_CM_IDLEST_VAL; } diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c index 7a8b51b35f9f..1c8bb83003bf 100644 --- a/drivers/clk/ti/clk-44xx.c +++ b/drivers/clk/ti/clk-44xx.c @@ -34,196 +34,13 @@ #define OMAP4_DPLL_USB_DEFFREQ 960000000 static struct ti_dt_clk omap44xx_clks[] = { - DT_CLK(NULL, "extalt_clkin_ck", "extalt_clkin_ck"), - DT_CLK(NULL, "pad_clks_src_ck", "pad_clks_src_ck"), - DT_CLK(NULL, "pad_clks_ck", "pad_clks_ck"), - DT_CLK(NULL, "pad_slimbus_core_clks_ck", "pad_slimbus_core_clks_ck"), - DT_CLK(NULL, "secure_32k_clk_src_ck", "secure_32k_clk_src_ck"), - DT_CLK(NULL, "slimbus_src_clk", "slimbus_src_clk"), - DT_CLK(NULL, "slimbus_clk", "slimbus_clk"), - DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"), - DT_CLK(NULL, "virt_12000000_ck", "virt_12000000_ck"), - DT_CLK(NULL, "virt_13000000_ck", "virt_13000000_ck"), - DT_CLK(NULL, "virt_16800000_ck", "virt_16800000_ck"), - DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"), - DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"), - DT_CLK(NULL, "virt_27000000_ck", "virt_27000000_ck"), - DT_CLK(NULL, "virt_38400000_ck", "virt_38400000_ck"), - DT_CLK(NULL, "sys_clkin_ck", "sys_clkin_ck"), - DT_CLK(NULL, "tie_low_clock_ck", "tie_low_clock_ck"), - DT_CLK(NULL, "utmi_phy_clkout_ck", "utmi_phy_clkout_ck"), - DT_CLK(NULL, "xclk60mhsp1_ck", "xclk60mhsp1_ck"), - DT_CLK(NULL, "xclk60mhsp2_ck", "xclk60mhsp2_ck"), - DT_CLK(NULL, "xclk60motg_ck", "xclk60motg_ck"), - DT_CLK(NULL, "abe_dpll_bypass_clk_mux_ck", "abe_dpll_bypass_clk_mux_ck"), - DT_CLK(NULL, "abe_dpll_refclk_mux_ck", "abe_dpll_refclk_mux_ck"), - DT_CLK(NULL, "dpll_abe_ck", "dpll_abe_ck"), - DT_CLK(NULL, "dpll_abe_x2_ck", "dpll_abe_x2_ck"), - DT_CLK(NULL, "dpll_abe_m2x2_ck", "dpll_abe_m2x2_ck"), - DT_CLK(NULL, "abe_24m_fclk", "abe_24m_fclk"), - DT_CLK(NULL, "abe_clk", "abe_clk"), - DT_CLK(NULL, "aess_fclk", "aess_fclk"), - DT_CLK(NULL, "dpll_abe_m3x2_ck", "dpll_abe_m3x2_ck"), - DT_CLK(NULL, "core_hsd_byp_clk_mux_ck", "core_hsd_byp_clk_mux_ck"), - DT_CLK(NULL, "dpll_core_ck", "dpll_core_ck"), - DT_CLK(NULL, "dpll_core_x2_ck", "dpll_core_x2_ck"), - DT_CLK(NULL, "dpll_core_m6x2_ck", "dpll_core_m6x2_ck"), - DT_CLK(NULL, "dbgclk_mux_ck", "dbgclk_mux_ck"), - DT_CLK(NULL, "dpll_core_m2_ck", "dpll_core_m2_ck"), - DT_CLK(NULL, "ddrphy_ck", "ddrphy_ck"), - DT_CLK(NULL, "dpll_core_m5x2_ck", "dpll_core_m5x2_ck"), - DT_CLK(NULL, "div_core_ck", "div_core_ck"), - DT_CLK(NULL, "div_iva_hs_clk", "div_iva_hs_clk"), - DT_CLK(NULL, "div_mpu_hs_clk", "div_mpu_hs_clk"), - DT_CLK(NULL, "dpll_core_m4x2_ck", "dpll_core_m4x2_ck"), - DT_CLK(NULL, "dll_clk_div_ck", "dll_clk_div_ck"), - DT_CLK(NULL, "dpll_abe_m2_ck", "dpll_abe_m2_ck"), - DT_CLK(NULL, "dpll_core_m3x2_ck", "dpll_core_m3x2_ck"), - DT_CLK(NULL, "dpll_core_m7x2_ck", "dpll_core_m7x2_ck"), - DT_CLK(NULL, "iva_hsd_byp_clk_mux_ck", "iva_hsd_byp_clk_mux_ck"), - DT_CLK(NULL, "dpll_iva_ck", "dpll_iva_ck"), - DT_CLK(NULL, "dpll_iva_x2_ck", "dpll_iva_x2_ck"), - DT_CLK(NULL, "dpll_iva_m4x2_ck", "dpll_iva_m4x2_ck"), - DT_CLK(NULL, "dpll_iva_m5x2_ck", "dpll_iva_m5x2_ck"), - DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"), - DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"), - DT_CLK(NULL, "per_hs_clk_div_ck", "per_hs_clk_div_ck"), - DT_CLK(NULL, "per_hsd_byp_clk_mux_ck", "per_hsd_byp_clk_mux_ck"), - DT_CLK(NULL, "dpll_per_ck", "dpll_per_ck"), - DT_CLK(NULL, "dpll_per_m2_ck", "dpll_per_m2_ck"), - DT_CLK(NULL, "dpll_per_x2_ck", "dpll_per_x2_ck"), - DT_CLK(NULL, "dpll_per_m2x2_ck", "dpll_per_m2x2_ck"), - DT_CLK(NULL, "dpll_per_m3x2_ck", "dpll_per_m3x2_ck"), - DT_CLK(NULL, "dpll_per_m4x2_ck", "dpll_per_m4x2_ck"), - DT_CLK(NULL, "dpll_per_m5x2_ck", "dpll_per_m5x2_ck"), - DT_CLK(NULL, "dpll_per_m6x2_ck", "dpll_per_m6x2_ck"), - DT_CLK(NULL, "dpll_per_m7x2_ck", "dpll_per_m7x2_ck"), - DT_CLK(NULL, "usb_hs_clk_div_ck", "usb_hs_clk_div_ck"), - DT_CLK(NULL, "dpll_usb_ck", "dpll_usb_ck"), - DT_CLK(NULL, "dpll_usb_clkdcoldo_ck", "dpll_usb_clkdcoldo_ck"), - DT_CLK(NULL, "dpll_usb_m2_ck", "dpll_usb_m2_ck"), - DT_CLK(NULL, "ducati_clk_mux_ck", "ducati_clk_mux_ck"), - DT_CLK(NULL, "func_12m_fclk", "func_12m_fclk"), - DT_CLK(NULL, "func_24m_clk", "func_24m_clk"), - DT_CLK(NULL, "func_24mc_fclk", "func_24mc_fclk"), - DT_CLK(NULL, "func_48m_fclk", "func_48m_fclk"), - DT_CLK(NULL, "func_48mc_fclk", "func_48mc_fclk"), - DT_CLK(NULL, "func_64m_fclk", "func_64m_fclk"), - DT_CLK(NULL, "func_96m_fclk", "func_96m_fclk"), - DT_CLK(NULL, "init_60m_fclk", "init_60m_fclk"), - DT_CLK(NULL, "l3_div_ck", "l3_div_ck"), - DT_CLK(NULL, "l4_div_ck", "l4_div_ck"), - DT_CLK(NULL, "lp_clk_div_ck", "lp_clk_div_ck"), - DT_CLK(NULL, "l4_wkup_clk_mux_ck", "l4_wkup_clk_mux_ck"), DT_CLK("smp_twd", NULL, "mpu_periphclk"), - DT_CLK(NULL, "ocp_abe_iclk", "ocp_abe_iclk"), - DT_CLK(NULL, "per_abe_24m_fclk", "per_abe_24m_fclk"), - DT_CLK(NULL, "per_abe_nc_fclk", "per_abe_nc_fclk"), - DT_CLK(NULL, "syc_clk_div_ck", "syc_clk_div_ck"), - DT_CLK(NULL, "aes1_fck", "aes1_fck"), - DT_CLK(NULL, "aes2_fck", "aes2_fck"), - DT_CLK(NULL, "dmic_sync_mux_ck", "dmic_sync_mux_ck"), - DT_CLK(NULL, "func_dmic_abe_gfclk", "func_dmic_abe_gfclk"), - DT_CLK(NULL, "dss_sys_clk", "dss_sys_clk"), - DT_CLK(NULL, "dss_tv_clk", "dss_tv_clk"), - DT_CLK(NULL, "dss_dss_clk", "dss_dss_clk"), - DT_CLK(NULL, "dss_48mhz_clk", "dss_48mhz_clk"), - DT_CLK(NULL, "dss_fck", "dss_fck"), DT_CLK("omapdss_dss", "ick", "dss_fck"), - DT_CLK(NULL, "fdif_fck", "fdif_fck"), - DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"), - DT_CLK(NULL, "gpio2_dbclk", "gpio2_dbclk"), - DT_CLK(NULL, "gpio3_dbclk", "gpio3_dbclk"), - DT_CLK(NULL, "gpio4_dbclk", "gpio4_dbclk"), - DT_CLK(NULL, "gpio5_dbclk", "gpio5_dbclk"), - DT_CLK(NULL, "gpio6_dbclk", "gpio6_dbclk"), - DT_CLK(NULL, "sgx_clk_mux", "sgx_clk_mux"), - DT_CLK(NULL, "hsi_fck", "hsi_fck"), - DT_CLK(NULL, "iss_ctrlclk", "iss_ctrlclk"), - DT_CLK(NULL, "mcasp_sync_mux_ck", "mcasp_sync_mux_ck"), - DT_CLK(NULL, "func_mcasp_abe_gfclk", "func_mcasp_abe_gfclk"), - DT_CLK(NULL, "mcbsp1_sync_mux_ck", "mcbsp1_sync_mux_ck"), - DT_CLK(NULL, "func_mcbsp1_gfclk", "func_mcbsp1_gfclk"), - DT_CLK(NULL, "mcbsp2_sync_mux_ck", "mcbsp2_sync_mux_ck"), - DT_CLK(NULL, "func_mcbsp2_gfclk", "func_mcbsp2_gfclk"), - DT_CLK(NULL, "mcbsp3_sync_mux_ck", "mcbsp3_sync_mux_ck"), - DT_CLK(NULL, "func_mcbsp3_gfclk", "func_mcbsp3_gfclk"), - DT_CLK(NULL, "mcbsp4_sync_mux_ck", "mcbsp4_sync_mux_ck"), - DT_CLK(NULL, "per_mcbsp4_gfclk", "per_mcbsp4_gfclk"), - DT_CLK(NULL, "hsmmc1_fclk", "hsmmc1_fclk"), - DT_CLK(NULL, "hsmmc2_fclk", "hsmmc2_fclk"), - DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "ocp2scp_usb_phy_phy_48m"), - DT_CLK(NULL, "sha2md5_fck", "sha2md5_fck"), - DT_CLK(NULL, "slimbus1_fclk_1", "slimbus1_fclk_1"), - DT_CLK(NULL, "slimbus1_fclk_0", "slimbus1_fclk_0"), - DT_CLK(NULL, "slimbus1_fclk_2", "slimbus1_fclk_2"), - DT_CLK(NULL, "slimbus1_slimbus_clk", "slimbus1_slimbus_clk"), - DT_CLK(NULL, "slimbus2_fclk_1", "slimbus2_fclk_1"), - DT_CLK(NULL, "slimbus2_fclk_0", "slimbus2_fclk_0"), - DT_CLK(NULL, "slimbus2_slimbus_clk", "slimbus2_slimbus_clk"), - DT_CLK(NULL, "smartreflex_core_fck", "smartreflex_core_fck"), - DT_CLK(NULL, "smartreflex_iva_fck", "smartreflex_iva_fck"), - DT_CLK(NULL, "smartreflex_mpu_fck", "smartreflex_mpu_fck"), - DT_CLK(NULL, "dmt1_clk_mux", "dmt1_clk_mux"), - DT_CLK(NULL, "cm2_dm10_mux", "cm2_dm10_mux"), - DT_CLK(NULL, "cm2_dm11_mux", "cm2_dm11_mux"), - DT_CLK(NULL, "cm2_dm2_mux", "cm2_dm2_mux"), - DT_CLK(NULL, "cm2_dm3_mux", "cm2_dm3_mux"), - DT_CLK(NULL, "cm2_dm4_mux", "cm2_dm4_mux"), - DT_CLK(NULL, "timer5_sync_mux", "timer5_sync_mux"), - DT_CLK(NULL, "timer6_sync_mux", "timer6_sync_mux"), - DT_CLK(NULL, "timer7_sync_mux", "timer7_sync_mux"), - DT_CLK(NULL, "timer8_sync_mux", "timer8_sync_mux"), - DT_CLK(NULL, "cm2_dm9_mux", "cm2_dm9_mux"), - DT_CLK(NULL, "usb_host_fs_fck", "usb_host_fs_fck"), DT_CLK("usbhs_omap", "fs_fck", "usb_host_fs_fck"), - DT_CLK(NULL, "utmi_p1_gfclk", "utmi_p1_gfclk"), - DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "usb_host_hs_utmi_p1_clk"), - DT_CLK(NULL, "utmi_p2_gfclk", "utmi_p2_gfclk"), - DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "usb_host_hs_utmi_p2_clk"), - DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "usb_host_hs_utmi_p3_clk"), - DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "usb_host_hs_hsic480m_p1_clk"), - DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "usb_host_hs_hsic60m_p1_clk"), - DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "usb_host_hs_hsic60m_p2_clk"), - DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "usb_host_hs_hsic480m_p2_clk"), - DT_CLK(NULL, "usb_host_hs_func48mclk", "usb_host_hs_func48mclk"), - DT_CLK(NULL, "usb_host_hs_fck", "usb_host_hs_fck"), DT_CLK("usbhs_omap", "hs_fck", "usb_host_hs_fck"), - DT_CLK(NULL, "otg_60m_gfclk", "otg_60m_gfclk"), - DT_CLK(NULL, "usb_otg_hs_xclk", "usb_otg_hs_xclk"), - DT_CLK(NULL, "usb_otg_hs_ick", "usb_otg_hs_ick"), DT_CLK("musb-omap2430", "ick", "usb_otg_hs_ick"), - DT_CLK(NULL, "usb_phy_cm_clk32k", "usb_phy_cm_clk32k"), - DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "usb_tll_hs_usb_ch2_clk"), - DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "usb_tll_hs_usb_ch0_clk"), - DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "usb_tll_hs_usb_ch1_clk"), - DT_CLK(NULL, "usb_tll_hs_ick", "usb_tll_hs_ick"), DT_CLK("usbhs_omap", "usbtll_ick", "usb_tll_hs_ick"), DT_CLK("usbhs_tll", "usbtll_ick", "usb_tll_hs_ick"), - DT_CLK(NULL, "usim_ck", "usim_ck"), - DT_CLK(NULL, "usim_fclk", "usim_fclk"), - DT_CLK(NULL, "pmd_stm_clock_mux_ck", "pmd_stm_clock_mux_ck"), - DT_CLK(NULL, "pmd_trace_clk_mux_ck", "pmd_trace_clk_mux_ck"), - DT_CLK(NULL, "stm_clk_div_ck", "stm_clk_div_ck"), - DT_CLK(NULL, "trace_clk_div_ck", "trace_clk_div_ck"), - DT_CLK(NULL, "auxclk0_src_ck", "auxclk0_src_ck"), - DT_CLK(NULL, "auxclk0_ck", "auxclk0_ck"), - DT_CLK(NULL, "auxclkreq0_ck", "auxclkreq0_ck"), - DT_CLK(NULL, "auxclk1_src_ck", "auxclk1_src_ck"), - DT_CLK(NULL, "auxclk1_ck", "auxclk1_ck"), - DT_CLK(NULL, "auxclkreq1_ck", "auxclkreq1_ck"), - DT_CLK(NULL, "auxclk2_src_ck", "auxclk2_src_ck"), - DT_CLK(NULL, "auxclk2_ck", "auxclk2_ck"), - DT_CLK(NULL, "auxclkreq2_ck", "auxclkreq2_ck"), - DT_CLK(NULL, "auxclk3_src_ck", "auxclk3_src_ck"), - DT_CLK(NULL, "auxclk3_ck", "auxclk3_ck"), - DT_CLK(NULL, "auxclkreq3_ck", "auxclkreq3_ck"), - DT_CLK(NULL, "auxclk4_src_ck", "auxclk4_src_ck"), - DT_CLK(NULL, "auxclk4_ck", "auxclk4_ck"), - DT_CLK(NULL, "auxclkreq4_ck", "auxclkreq4_ck"), - DT_CLK(NULL, "auxclk5_src_ck", "auxclk5_src_ck"), - DT_CLK(NULL, "auxclk5_ck", "auxclk5_ck"), - DT_CLK(NULL, "auxclkreq5_ck", "auxclkreq5_ck"), DT_CLK("omap_i2c.1", "ick", "dummy_ck"), DT_CLK("omap_i2c.2", "ick", "dummy_ck"), DT_CLK("omap_i2c.3", "ick", "dummy_ck"), @@ -263,9 +80,6 @@ static struct ti_dt_clk omap44xx_clks[] = { DT_CLK("4013c000.timer", "timer_sys_ck", "syc_clk_div_ck"), DT_CLK("4013e000.timer", "timer_sys_ck", "syc_clk_div_ck"), DT_CLK(NULL, "cpufreq_ck", "dpll_mpu_ck"), - DT_CLK(NULL, "bandgap_fclk", "bandgap_fclk"), - DT_CLK(NULL, "div_ts_ck", "div_ts_ck"), - DT_CLK(NULL, "bandgap_ts_fclk", "bandgap_ts_fclk"), { .node_name = NULL }, }; @@ -278,6 +92,8 @@ int __init omap4xxx_dt_clk_init(void) omap2_clk_disable_autoidle_all(); + ti_clk_add_aliases(); + /* * Lock USB DPLL on OMAP4 devices so that the L3INIT power * domain can transition to retention state when not in use. diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index 45d05339d583..13eb04f72389 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -24,6 +24,9 @@ #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/clk/ti.h> + +#include "clock.h" #define DRA7_ATL_INSTANCES 4 @@ -171,6 +174,7 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node) struct clk_init_data init = { NULL }; const char **parent_names = NULL; struct clk *clk; + int ret; clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); if (!clk_hw) { @@ -200,9 +204,14 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node) init.parent_names = parent_names; - clk = clk_register(NULL, &clk_hw->hw); + clk = ti_clk_register(NULL, &clk_hw->hw, node->name); if (!IS_ERR(clk)) { + ret = ti_clk_add_alias(NULL, clk, node->name); + if (ret) { + clk_unregister(clk); + goto cleanup; + } of_clk_add_provider(node, of_clk_src_simple_get, clk); kfree(parent_names); return; diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 5fcf247759ac..e5a1c8297a1d 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -24,6 +24,7 @@ #include <linux/list.h> #include <linux/regmap.h> #include <linux/bootmem.h> +#include <linux/device.h> #include "clock.h" @@ -42,27 +43,29 @@ struct clk_iomap { static struct clk_iomap *clk_memmaps[CLK_MAX_MEMMAPS]; -static void clk_memmap_writel(u32 val, void __iomem *reg) +static void clk_memmap_writel(u32 val, const struct clk_omap_reg *reg) { - struct clk_omap_reg *r = (struct clk_omap_reg *)® - struct clk_iomap *io = clk_memmaps[r->index]; + struct clk_iomap *io = clk_memmaps[reg->index]; - if (io->regmap) - regmap_write(io->regmap, r->offset, val); + if (reg->ptr) + writel_relaxed(val, reg->ptr); + else if (io->regmap) + regmap_write(io->regmap, reg->offset, val); else - writel_relaxed(val, io->mem + r->offset); + writel_relaxed(val, io->mem + reg->offset); } -static u32 clk_memmap_readl(void __iomem *reg) +static u32 clk_memmap_readl(const struct clk_omap_reg *reg) { u32 val; - struct clk_omap_reg *r = (struct clk_omap_reg *)® - struct clk_iomap *io = clk_memmaps[r->index]; + struct clk_iomap *io = clk_memmaps[reg->index]; - if (io->regmap) - regmap_read(io->regmap, r->offset, &val); + if (reg->ptr) + val = readl_relaxed(reg->ptr); + else if (io->regmap) + regmap_read(io->regmap, reg->offset, &val); else - val = readl_relaxed(io->mem + r->offset); + val = readl_relaxed(io->mem + reg->offset); return val; } @@ -161,20 +164,18 @@ int __init ti_clk_retry_init(struct device_node *node, struct clk_hw *hw, * ti_clk_get_reg_addr - get register address for a clock register * @node: device node for the clock * @index: register index from the clock node + * @reg: pointer to target register struct * - * Builds clock register address from device tree information. This - * is a struct of type clk_omap_reg. Returns a pointer to the register - * address, or a pointer error value in failure. + * Builds clock register address from device tree information, and returns + * the data via the provided output pointer @reg. Returns 0 on success, + * negative error value on failure. */ -void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index) +int ti_clk_get_reg_addr(struct device_node *node, int index, + struct clk_omap_reg *reg) { - struct clk_omap_reg *reg; u32 val; - u32 tmp; int i; - reg = (struct clk_omap_reg *)&tmp; - for (i = 0; i < CLK_MAX_MEMMAPS; i++) { if (clocks_node_ptr[i] == node->parent) break; @@ -182,19 +183,20 @@ void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index) if (i == CLK_MAX_MEMMAPS) { pr_err("clk-provider not found for %s!\n", node->name); - return IOMEM_ERR_PTR(-ENOENT); + return -ENOENT; } reg->index = i; if (of_property_read_u32_index(node, "reg", index, &val)) { pr_err("%s must have reg[%d]!\n", node->name, index); - return IOMEM_ERR_PTR(-EINVAL); + return -EINVAL; } reg->offset = val; + reg->ptr = NULL; - return (__force void __iomem *)tmp; + return 0; } /** @@ -297,6 +299,7 @@ struct clk __init *ti_clk_register_clk(struct ti_clk *setup) struct ti_clk_fixed *fixed; struct ti_clk_fixed_factor *fixed_factor; struct clk_hw *clk_hw; + int ret; if (setup->clk) return setup->clk; @@ -307,6 +310,13 @@ struct clk __init *ti_clk_register_clk(struct ti_clk *setup) clk = clk_register_fixed_rate(NULL, setup->name, NULL, 0, fixed->frequency); + if (!IS_ERR(clk)) { + ret = ti_clk_add_alias(NULL, clk, setup->name); + if (ret) { + clk_unregister(clk); + clk = ERR_PTR(ret); + } + } break; case TI_CLK_MUX: clk = ti_clk_register_mux(setup); @@ -324,6 +334,13 @@ struct clk __init *ti_clk_register_clk(struct ti_clk *setup) fixed_factor->parent, 0, fixed_factor->mult, fixed_factor->div); + if (!IS_ERR(clk)) { + ret = ti_clk_add_alias(NULL, clk, setup->name); + if (ret) { + clk_unregister(clk); + clk = ERR_PTR(ret); + } + } break; case TI_CLK_GATE: clk = ti_clk_register_gate(setup); @@ -371,9 +388,6 @@ int __init ti_clk_register_legacy_clks(struct ti_clk_alias *clks) clks->clk->name, PTR_ERR(clk)); return PTR_ERR(clk); } - } else { - clks->lk.clk = clk; - clkdev_add(&clks->lk); } clks++; } @@ -396,8 +410,6 @@ int __init ti_clk_register_legacy_clks(struct ti_clk_alias *clks) } } else { retry = true; - retry_clk->lk.clk = clk; - clkdev_add(&retry_clk->lk); list_del(&retry_clk->link); } } @@ -407,6 +419,32 @@ int __init ti_clk_register_legacy_clks(struct ti_clk_alias *clks) } #endif +static const struct of_device_id simple_clk_match_table[] __initconst = { + { .compatible = "fixed-clock" }, + { .compatible = "fixed-factor-clock" }, + { } +}; + +/** + * ti_clk_add_aliases - setup clock aliases + * + * Sets up any missing clock aliases. No return value. + */ +void __init ti_clk_add_aliases(void) +{ + struct device_node *np; + struct clk *clk; + + for_each_matching_node(np, simple_clk_match_table) { + struct of_phandle_args clkspec; + + clkspec.np = np; + clk = of_clk_get_from_provider(&clkspec); + + ti_clk_add_alias(NULL, clk, np->name); + } +} + /** * ti_clk_setup_features - setup clock features flags * @features: features definition to use @@ -453,3 +491,66 @@ void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks) clk_prepare_enable(init_clk); } } + +/** + * ti_clk_add_alias - add a clock alias for a TI clock + * @dev: device alias for this clock + * @clk: clock handle to create alias for + * @con: connection ID for this clock + * + * Creates a clock alias for a TI clock. Allocates the clock lookup entry + * and assigns the data to it. Returns 0 if successful, negative error + * value otherwise. + */ +int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con) +{ + struct clk_lookup *cl; + + if (!clk) + return 0; + + if (IS_ERR(clk)) + return PTR_ERR(clk); + + cl = kzalloc(sizeof(*cl), GFP_KERNEL); + if (!cl) + return -ENOMEM; + + if (dev) + cl->dev_id = dev_name(dev); + cl->con_id = con; + cl->clk = clk; + + clkdev_add(cl); + + return 0; +} + +/** + * ti_clk_register - register a TI clock to the common clock framework + * @dev: device for this clock + * @hw: hardware clock handle + * @con: connection ID for this clock + * + * Registers a TI clock to the common clock framework, and adds a clock + * alias for it. Returns a handle to the registered clock if successful, + * ERR_PTR value in failure. + */ +struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw, + const char *con) +{ + struct clk *clk; + int ret; + + clk = clk_register(dev, hw); + if (IS_ERR(clk)) + return clk; + + ret = ti_clk_add_alias(dev, clk, con); + if (ret) { + clk_unregister(clk); + return ERR_PTR(ret); + } + + return clk; +} diff --git a/drivers/clk/ti/clkt_dflt.c b/drivers/clk/ti/clkt_dflt.c index c6ae563801d7..91751dd26b16 100644 --- a/drivers/clk/ti/clkt_dflt.c +++ b/drivers/clk/ti/clkt_dflt.c @@ -55,7 +55,8 @@ * elapsed. XXX Deprecated - should be moved into drivers for the * individual IP block that the IDLEST register exists in. */ -static int _wait_idlest_generic(struct clk_hw_omap *clk, void __iomem *reg, +static int _wait_idlest_generic(struct clk_hw_omap *clk, + struct clk_omap_reg *reg, u32 mask, u8 idlest, const char *name) { int i = 0, ena = 0; @@ -91,7 +92,7 @@ static int _wait_idlest_generic(struct clk_hw_omap *clk, void __iomem *reg, */ static void _omap2_module_wait_ready(struct clk_hw_omap *clk) { - void __iomem *companion_reg, *idlest_reg; + struct clk_omap_reg companion_reg, idlest_reg; u8 other_bit, idlest_bit, idlest_val, idlest_reg_id; s16 prcm_mod; int r; @@ -99,17 +100,17 @@ static void _omap2_module_wait_ready(struct clk_hw_omap *clk) /* Not all modules have multiple clocks that their IDLEST depends on */ if (clk->ops->find_companion) { clk->ops->find_companion(clk, &companion_reg, &other_bit); - if (!(ti_clk_ll_ops->clk_readl(companion_reg) & + if (!(ti_clk_ll_ops->clk_readl(&companion_reg) & (1 << other_bit))) return; } clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val); - r = ti_clk_ll_ops->cm_split_idlest_reg(idlest_reg, &prcm_mod, + r = ti_clk_ll_ops->cm_split_idlest_reg(&idlest_reg, &prcm_mod, &idlest_reg_id); if (r) { /* IDLEST register not in the CM module */ - _wait_idlest_generic(clk, idlest_reg, (1 << idlest_bit), + _wait_idlest_generic(clk, &idlest_reg, (1 << idlest_bit), idlest_val, clk_hw_get_name(&clk->hw)); } else { ti_clk_ll_ops->cm_wait_module_ready(0, prcm_mod, idlest_reg_id, @@ -139,17 +140,17 @@ static void _omap2_module_wait_ready(struct clk_hw_omap *clk) * avoid this issue, and remove the casts. No return value. */ void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk, - void __iomem **other_reg, u8 *other_bit) + struct clk_omap_reg *other_reg, + u8 *other_bit) { - u32 r; + memcpy(other_reg, &clk->enable_reg, sizeof(*other_reg)); /* * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes * it's just a matter of XORing the bits. */ - r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN)); + other_reg->offset ^= (CM_FCLKEN ^ CM_ICLKEN); - *other_reg = (__force void __iomem *)r; *other_bit = clk->enable_bit; } @@ -168,13 +169,14 @@ void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk, * CM_IDLEST2). This is not true for all modules. No return value. */ void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, u8 *idlest_bit, + struct clk_omap_reg *idlest_reg, u8 *idlest_bit, u8 *idlest_val) { - u32 r; + memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg)); + + idlest_reg->offset &= ~0xf0; + idlest_reg->offset |= 0x20; - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; *idlest_bit = clk->enable_bit; /* @@ -222,31 +224,19 @@ int omap2_dflt_clk_enable(struct clk_hw *hw) } } - if (IS_ERR(clk->enable_reg)) { - pr_err("%s: %s missing enable_reg\n", __func__, - clk_hw_get_name(hw)); - ret = -EINVAL; - goto err; - } - /* FIXME should not have INVERT_ENABLE bit here */ - v = ti_clk_ll_ops->clk_readl(clk->enable_reg); + v = ti_clk_ll_ops->clk_readl(&clk->enable_reg); if (clk->flags & INVERT_ENABLE) v &= ~(1 << clk->enable_bit); else v |= (1 << clk->enable_bit); - ti_clk_ll_ops->clk_writel(v, clk->enable_reg); - v = ti_clk_ll_ops->clk_readl(clk->enable_reg); /* OCP barrier */ + ti_clk_ll_ops->clk_writel(v, &clk->enable_reg); + v = ti_clk_ll_ops->clk_readl(&clk->enable_reg); /* OCP barrier */ if (clk->ops && clk->ops->find_idlest) _omap2_module_wait_ready(clk); return 0; - -err: - if (clkdm_control && clk->clkdm) - ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk); - return ret; } /** @@ -264,22 +254,13 @@ void omap2_dflt_clk_disable(struct clk_hw *hw) u32 v; clk = to_clk_hw_omap(hw); - if (IS_ERR(clk->enable_reg)) { - /* - * 'independent' here refers to a clock which is not - * controlled by its parent. - */ - pr_err("%s: independent clock %s has no enable_reg\n", - __func__, clk_hw_get_name(hw)); - return; - } - v = ti_clk_ll_ops->clk_readl(clk->enable_reg); + v = ti_clk_ll_ops->clk_readl(&clk->enable_reg); if (clk->flags & INVERT_ENABLE) v |= (1 << clk->enable_bit); else v &= ~(1 << clk->enable_bit); - ti_clk_ll_ops->clk_writel(v, clk->enable_reg); + ti_clk_ll_ops->clk_writel(v, &clk->enable_reg); /* No OCP barrier needed here since it is a disable operation */ if (!(ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) && @@ -300,7 +281,7 @@ int omap2_dflt_clk_is_enabled(struct clk_hw *hw) struct clk_hw_omap *clk = to_clk_hw_omap(hw); u32 v; - v = ti_clk_ll_ops->clk_readl(clk->enable_reg); + v = ti_clk_ll_ops->clk_readl(&clk->enable_reg); if (clk->flags & INVERT_ENABLE) v ^= BIT(clk->enable_bit); diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c index b919fdfe8256..ce98da2c10be 100644 --- a/drivers/clk/ti/clkt_dpll.c +++ b/drivers/clk/ti/clkt_dpll.c @@ -213,7 +213,7 @@ u8 omap2_init_dpll_parent(struct clk_hw *hw) if (!dd) return -EINVAL; - v = ti_clk_ll_ops->clk_readl(dd->control_reg); + v = ti_clk_ll_ops->clk_readl(&dd->control_reg); v &= dd->enable_mask; v >>= __ffs(dd->enable_mask); @@ -249,14 +249,14 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk) return 0; /* Return bypass rate if DPLL is bypassed */ - v = ti_clk_ll_ops->clk_readl(dd->control_reg); + v = ti_clk_ll_ops->clk_readl(&dd->control_reg); v &= dd->enable_mask; v >>= __ffs(dd->enable_mask); if (_omap2_dpll_is_in_bypass(v)) return clk_hw_get_rate(dd->clk_bypass); - v = ti_clk_ll_ops->clk_readl(dd->mult_div1_reg); + v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); dpll_mult = v & dd->mult_mask; dpll_mult >>= __ffs(dd->mult_mask); dpll_div = v & dd->div1_mask; diff --git a/drivers/clk/ti/clkt_iclk.c b/drivers/clk/ti/clkt_iclk.c index 38c36908cf88..60b583d7db33 100644 --- a/drivers/clk/ti/clkt_iclk.c +++ b/drivers/clk/ti/clkt_iclk.c @@ -31,28 +31,29 @@ void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk) { u32 v; - void __iomem *r; + struct clk_omap_reg r; - r = (__force void __iomem *) - ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN)); + memcpy(&r, &clk->enable_reg, sizeof(r)); + r.offset ^= (CM_AUTOIDLE ^ CM_ICLKEN); - v = ti_clk_ll_ops->clk_readl(r); + v = ti_clk_ll_ops->clk_readl(&r); v |= (1 << clk->enable_bit); - ti_clk_ll_ops->clk_writel(v, r); + ti_clk_ll_ops->clk_writel(v, &r); } /* XXX */ void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk) { u32 v; - void __iomem *r; + struct clk_omap_reg r; - r = (__force void __iomem *) - ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN)); + memcpy(&r, &clk->enable_reg, sizeof(r)); - v = ti_clk_ll_ops->clk_readl(r); + r.offset ^= (CM_AUTOIDLE ^ CM_ICLKEN); + + v = ti_clk_ll_ops->clk_readl(&r); v &= ~(1 << clk->enable_bit); - ti_clk_ll_ops->clk_writel(v, r); + ti_clk_ll_ops->clk_writel(v, &r); } /** @@ -68,14 +69,12 @@ void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk) * modules. No return value. */ static void omap2430_clk_i2chs_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, + struct clk_omap_reg *idlest_reg, u8 *idlest_bit, u8 *idlest_val) { - u32 r; - - r = ((__force u32)clk->enable_reg ^ (OMAP24XX_CM_FCLKEN2 ^ CM_IDLEST)); - *idlest_reg = (__force void __iomem *)r; + memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg)); + idlest_reg->offset ^= (OMAP24XX_CM_FCLKEN2 ^ CM_IDLEST); *idlest_bit = clk->enable_bit; *idlest_val = OMAP24XX_CM_IDLEST_VAL; } diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h index 13c37f48d9d6..3f7b26540be8 100644 --- a/drivers/clk/ti/clock.h +++ b/drivers/clk/ti/clock.h @@ -16,6 +16,28 @@ #ifndef __DRIVERS_CLK_TI_CLOCK__ #define __DRIVERS_CLK_TI_CLOCK__ +struct clk_omap_divider { + struct clk_hw hw; + struct clk_omap_reg reg; + u8 shift; + u8 width; + u8 flags; + const struct clk_div_table *table; +}; + +#define to_clk_omap_divider(_hw) container_of(_hw, struct clk_omap_divider, hw) + +struct clk_omap_mux { + struct clk_hw hw; + struct clk_omap_reg reg; + u32 *table; + u32 mask; + u8 shift; + u8 flags; +}; + +#define to_clk_omap_mux(_hw) container_of(_hw, struct clk_omap_mux, hw) + enum { TI_CLK_FIXED, TI_CLK_MUX, @@ -86,7 +108,7 @@ struct ti_clk_mux { int num_parents; u16 reg; u8 module; - const char **parents; + const char * const *parents; u16 flags; }; @@ -189,16 +211,25 @@ struct clk *ti_clk_register_mux(struct ti_clk *setup); struct clk *ti_clk_register_divider(struct ti_clk *setup); struct clk *ti_clk_register_composite(struct ti_clk *setup); struct clk *ti_clk_register_dpll(struct ti_clk *setup); +struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw, + const char *con); +int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con); +void ti_clk_add_aliases(void); struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup); struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup); struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup); +int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div, + u8 flags, u8 *width, + const struct clk_div_table **table); + void ti_clk_patch_legacy_clks(struct ti_clk **patch); struct clk *ti_clk_register_clk(struct ti_clk *setup); int ti_clk_register_legacy_clks(struct ti_clk_alias *clks); -void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index); +int ti_clk_get_reg_addr(struct device_node *node, int index, + struct clk_omap_reg *reg); void ti_dt_clocks_register(struct ti_dt_clk *oclks); int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw, ti_of_clk_init_cb_t func); @@ -223,7 +254,9 @@ extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait; extern const struct clk_ops ti_clk_divider_ops; extern const struct clk_ops ti_clk_mux_ops; +extern const struct clk_ops omap_gate_clk_ops; +void omap2_init_clk_clkdm(struct clk_hw *hw); int omap2_clkops_enable_clkdm(struct clk_hw *hw); void omap2_clkops_disable_clkdm(struct clk_hw *hw); @@ -231,10 +264,10 @@ int omap2_dflt_clk_enable(struct clk_hw *hw); void omap2_dflt_clk_disable(struct clk_hw *hw); int omap2_dflt_clk_is_enabled(struct clk_hw *hw); void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk, - void __iomem **other_reg, + struct clk_omap_reg *other_reg, u8 *other_bit); void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, + struct clk_omap_reg *idlest_reg, u8 *idlest_bit, u8 *idlest_val); void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk); diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c index 6cf9dd189a92..fbedc6a9fed0 100644 --- a/drivers/clk/ti/clockdomain.c +++ b/drivers/clk/ti/clockdomain.c @@ -52,10 +52,6 @@ int omap2_clkops_enable_clkdm(struct clk_hw *hw) return -EINVAL; } - if (unlikely(clk->enable_reg)) - pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__, - clk_hw_get_name(hw)); - if (ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) { pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n", __func__, clk_hw_get_name(hw)); @@ -90,10 +86,6 @@ void omap2_clkops_disable_clkdm(struct clk_hw *hw) return; } - if (unlikely(clk->enable_reg)) - pr_err("%s: %s: should use dflt_clk_disable ?!\n", __func__, - clk_hw_get_name(hw)); - if (ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) { pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n", __func__, clk_hw_get_name(hw)); @@ -103,6 +95,36 @@ void omap2_clkops_disable_clkdm(struct clk_hw *hw) ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk); } +/** + * omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk + * @clk: OMAP clock struct ptr to use + * + * Convert a clockdomain name stored in a struct clk 'clk' into a + * clockdomain pointer, and save it into the struct clk. Intended to be + * called during clk_register(). No return value. + */ +void omap2_init_clk_clkdm(struct clk_hw *hw) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + struct clockdomain *clkdm; + const char *clk_name; + + if (!clk->clkdm_name) + return; + + clk_name = __clk_get_name(hw->clk); + + clkdm = ti_clk_ll_ops->clkdm_lookup(clk->clkdm_name); + if (clkdm) { + pr_debug("clock: associated clk %s to clkdm %s\n", + clk_name, clk->clkdm_name); + clk->clkdm = clkdm; + } else { + pr_debug("clock: could not associate clk %s to clkdm %s\n", + clk_name, clk->clkdm_name); + } +} + static void __init of_ti_clockdomain_setup(struct device_node *node) { struct clk *clk; diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c index 1cf70f452e1e..beea89463ca2 100644 --- a/drivers/clk/ti/composite.c +++ b/drivers/clk/ti/composite.c @@ -124,8 +124,9 @@ struct clk *ti_clk_register_composite(struct ti_clk *setup) struct clk_hw *mux; struct clk_hw *div; int num_parents = 1; - const char **parent_names = NULL; + const char * const *parent_names = NULL; struct clk *clk; + int ret; comp = setup->data; @@ -150,6 +151,12 @@ struct clk *ti_clk_register_composite(struct ti_clk *setup) &ti_composite_divider_ops, gate, &ti_composite_gate_ops, 0); + ret = ti_clk_add_alias(NULL, clk, setup->name); + if (ret) { + clk_unregister(clk); + return ERR_PTR(ret); + } + return clk; } #endif @@ -163,6 +170,7 @@ static void __init _register_composite(struct clk_hw *hw, int num_parents = 0; const char **parent_names = NULL; int i; + int ret; /* Check for presence of each component clock */ for (i = 0; i < CLK_COMPONENT_TYPE_MAX; i++) { @@ -217,8 +225,14 @@ static void __init _register_composite(struct clk_hw *hw, _get_hw(cclk, CLK_COMPONENT_TYPE_GATE), &ti_composite_gate_ops, 0); - if (!IS_ERR(clk)) + if (!IS_ERR(clk)) { + ret = ti_clk_add_alias(NULL, clk, node->name); + if (ret) { + clk_unregister(clk); + goto cleanup; + } of_clk_add_provider(node, of_clk_src_simple_get, clk); + } cleanup: /* Free component clock list entries */ diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index 6bb87784a0d6..88f04a4cb890 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c @@ -39,7 +39,7 @@ static unsigned int _get_table_maxdiv(const struct clk_div_table *table) return maxdiv; } -static unsigned int _get_maxdiv(struct clk_divider *divider) +static unsigned int _get_maxdiv(struct clk_omap_divider *divider) { if (divider->flags & CLK_DIVIDER_ONE_BASED) return div_mask(divider); @@ -61,7 +61,7 @@ static unsigned int _get_table_div(const struct clk_div_table *table, return 0; } -static unsigned int _get_div(struct clk_divider *divider, unsigned int val) +static unsigned int _get_div(struct clk_omap_divider *divider, unsigned int val) { if (divider->flags & CLK_DIVIDER_ONE_BASED) return val; @@ -83,7 +83,7 @@ static unsigned int _get_table_val(const struct clk_div_table *table, return 0; } -static unsigned int _get_val(struct clk_divider *divider, u8 div) +static unsigned int _get_val(struct clk_omap_divider *divider, u8 div) { if (divider->flags & CLK_DIVIDER_ONE_BASED) return div; @@ -97,10 +97,10 @@ static unsigned int _get_val(struct clk_divider *divider, u8 div) static unsigned long ti_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { - struct clk_divider *divider = to_clk_divider(hw); + struct clk_omap_divider *divider = to_clk_omap_divider(hw); unsigned int div, val; - val = ti_clk_ll_ops->clk_readl(divider->reg) >> divider->shift; + val = ti_clk_ll_ops->clk_readl(÷r->reg) >> divider->shift; val &= div_mask(divider); div = _get_div(divider, val); @@ -131,7 +131,7 @@ static bool _is_valid_table_div(const struct clk_div_table *table, return false; } -static bool _is_valid_div(struct clk_divider *divider, unsigned int div) +static bool _is_valid_div(struct clk_omap_divider *divider, unsigned int div) { if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) return is_power_of_2(div); @@ -172,7 +172,7 @@ static int _div_round(const struct clk_div_table *table, static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, unsigned long *best_parent_rate) { - struct clk_divider *divider = to_clk_divider(hw); + struct clk_omap_divider *divider = to_clk_omap_divider(hw); int i, bestdiv = 0; unsigned long parent_rate, best = 0, now, maxdiv; unsigned long parent_rate_saved = *best_parent_rate; @@ -239,14 +239,14 @@ static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { - struct clk_divider *divider; + struct clk_omap_divider *divider; unsigned int div, value; u32 val; if (!hw || !rate) return -EINVAL; - divider = to_clk_divider(hw); + divider = to_clk_omap_divider(hw); div = DIV_ROUND_UP(parent_rate, rate); value = _get_val(divider, div); @@ -257,11 +257,11 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, if (divider->flags & CLK_DIVIDER_HIWORD_MASK) { val = div_mask(divider) << (divider->shift + 16); } else { - val = ti_clk_ll_ops->clk_readl(divider->reg); + val = ti_clk_ll_ops->clk_readl(÷r->reg); val &= ~(div_mask(divider) << divider->shift); } val |= value << divider->shift; - ti_clk_ll_ops->clk_writel(val, divider->reg); + ti_clk_ll_ops->clk_writel(val, ÷r->reg); return 0; } @@ -274,11 +274,12 @@ const struct clk_ops ti_clk_divider_ops = { static struct clk *_register_divider(struct device *dev, const char *name, const char *parent_name, - unsigned long flags, void __iomem *reg, + unsigned long flags, + struct clk_omap_reg *reg, u8 shift, u8 width, u8 clk_divider_flags, const struct clk_div_table *table) { - struct clk_divider *div; + struct clk_omap_divider *div; struct clk *clk; struct clk_init_data init; @@ -303,7 +304,7 @@ static struct clk *_register_divider(struct device *dev, const char *name, init.num_parents = (parent_name ? 1 : 0); /* struct clk_divider assignments */ - div->reg = reg; + memcpy(&div->reg, reg, sizeof(*reg)); div->shift = shift; div->width = width; div->flags = clk_divider_flags; @@ -311,7 +312,7 @@ static struct clk *_register_divider(struct device *dev, const char *name, div->table = table; /* register the clock */ - clk = clk_register(dev, &div->hw); + clk = ti_clk_register(dev, &div->hw, name); if (IS_ERR(clk)) kfree(div); @@ -319,20 +320,17 @@ static struct clk *_register_divider(struct device *dev, const char *name, return clk; } -static struct clk_div_table * -_get_div_table_from_setup(struct ti_clk_divider *setup, u8 *width) +int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div, + u8 flags, u8 *width, + const struct clk_div_table **table) { int valid_div = 0; - struct clk_div_table *table; - int i; - int div; u32 val; - u8 flags; - - if (!setup->num_dividers) { - /* Clk divider table not provided, determine min/max divs */ - flags = setup->flags; + int div; + int i; + struct clk_div_table *tmp; + if (!div_table) { if (flags & CLKF_INDEX_STARTS_AT_ONE) val = 1; else @@ -340,7 +338,7 @@ _get_div_table_from_setup(struct ti_clk_divider *setup, u8 *width) div = 1; - while (div < setup->max_div) { + while (div < max_div) { if (flags & CLKF_INDEX_POWER_OF_TWO) div <<= 1; else @@ -349,37 +347,59 @@ _get_div_table_from_setup(struct ti_clk_divider *setup, u8 *width) } *width = fls(val); + *table = NULL; - return NULL; + return 0; } - for (i = 0; i < setup->num_dividers; i++) - if (setup->dividers[i]) + i = 0; + + while (!num_dividers || i < num_dividers) { + if (div_table[i] == -1) + break; + if (div_table[i]) valid_div++; + i++; + } - table = kzalloc(sizeof(*table) * (valid_div + 1), GFP_KERNEL); - if (!table) - return ERR_PTR(-ENOMEM); + num_dividers = i; + + tmp = kzalloc(sizeof(*tmp) * (valid_div + 1), GFP_KERNEL); + if (!tmp) + return -ENOMEM; valid_div = 0; *width = 0; - for (i = 0; i < setup->num_dividers; i++) - if (setup->dividers[i]) { - table[valid_div].div = setup->dividers[i]; - table[valid_div].val = i; + for (i = 0; i < num_dividers; i++) + if (div_table[i] > 0) { + tmp[valid_div].div = div_table[i]; + tmp[valid_div].val = i; valid_div++; *width = i; } *width = fls(*width); + *table = tmp; + + return 0; +} + +static const struct clk_div_table * +_get_div_table_from_setup(struct ti_clk_divider *setup, u8 *width) +{ + const struct clk_div_table *table = NULL; + + ti_clk_parse_divider_data(setup->dividers, setup->num_dividers, + setup->max_div, setup->flags, width, + &table); return table; } struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup) { - struct clk_divider *div; + struct clk_omap_divider *div; struct clk_omap_reg *reg; if (!setup) @@ -408,22 +428,17 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup) struct clk *ti_clk_register_divider(struct ti_clk *setup) { - struct ti_clk_divider *div; - struct clk_omap_reg *reg_setup; - u32 reg; + struct ti_clk_divider *div = setup->data; + struct clk_omap_reg reg = { + .index = div->module, + .offset = div->reg, + }; u8 width; u32 flags = 0; u8 div_flags = 0; - struct clk_div_table *table; + const struct clk_div_table *table; struct clk *clk; - div = setup->data; - - reg_setup = (struct clk_omap_reg *)® - - reg_setup->index = div->module; - reg_setup->offset = div->reg; - if (div->flags & CLKF_INDEX_STARTS_AT_ONE) div_flags |= CLK_DIVIDER_ONE_BASED; @@ -438,7 +453,7 @@ struct clk *ti_clk_register_divider(struct ti_clk *setup) return (struct clk *)table; clk = _register_divider(NULL, setup->name, div->parent, - flags, (void __iomem *)reg, div->bit_shift, + flags, ®, div->bit_shift, width, div_flags, table); if (IS_ERR(clk)) @@ -542,14 +557,15 @@ static int _get_divider_width(struct device_node *node, } static int __init ti_clk_divider_populate(struct device_node *node, - void __iomem **reg, const struct clk_div_table **table, + struct clk_omap_reg *reg, const struct clk_div_table **table, u32 *flags, u8 *div_flags, u8 *width, u8 *shift) { u32 val; + int ret; - *reg = ti_clk_get_reg_addr(node, 0); - if (IS_ERR(*reg)) - return PTR_ERR(*reg); + ret = ti_clk_get_reg_addr(node, 0, reg); + if (ret) + return ret; if (!of_property_read_u32(node, "ti,bit-shift", &val)) *shift = val; @@ -588,7 +604,7 @@ static void __init of_ti_divider_clk_setup(struct device_node *node) { struct clk *clk; const char *parent_name; - void __iomem *reg; + struct clk_omap_reg reg; u8 clk_divider_flags = 0; u8 width = 0; u8 shift = 0; @@ -601,7 +617,7 @@ static void __init of_ti_divider_clk_setup(struct device_node *node) &clk_divider_flags, &width, &shift)) goto cleanup; - clk = _register_divider(NULL, node->name, parent_name, flags, reg, + clk = _register_divider(NULL, node->name, parent_name, flags, ®, shift, width, clk_divider_flags, table); if (!IS_ERR(clk)) { @@ -617,7 +633,7 @@ CLK_OF_DECLARE(divider_clk, "ti,divider-clock", of_ti_divider_clk_setup); static void __init of_ti_composite_divider_clk_setup(struct device_node *node) { - struct clk_divider *div; + struct clk_omap_divider *div; u32 val; div = kzalloc(sizeof(*div), GFP_KERNEL); diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c index 4b9a419d8e14..d4e4444bc5ca 100644 --- a/drivers/clk/ti/dpll.c +++ b/drivers/clk/ti/dpll.c @@ -185,7 +185,7 @@ static void __init _register_dpll(struct clk_hw *hw, dd->clk_bypass = __clk_get_hw(clk); /* register the clock */ - clk = clk_register(NULL, &clk_hw->hw); + clk = ti_clk_register(NULL, &clk_hw->hw, node->name); if (!IS_ERR(clk)) { omap2_init_clk_hw_omap_clocks(&clk_hw->hw); @@ -203,17 +203,10 @@ cleanup: } #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS) -static void __iomem *_get_reg(u8 module, u16 offset) +void _get_reg(u8 module, u16 offset, struct clk_omap_reg *reg) { - u32 reg; - struct clk_omap_reg *reg_setup; - - reg_setup = (struct clk_omap_reg *)® - - reg_setup->index = module; - reg_setup->offset = offset; - - return (void __iomem *)reg; + reg->index = module; + reg->offset = offset; } struct clk *ti_clk_register_dpll(struct ti_clk *setup) @@ -248,7 +241,6 @@ struct clk *ti_clk_register_dpll(struct ti_clk *setup) clk_hw->dpll_data = dd; clk_hw->ops = &clkhwops_omap3_dpll; clk_hw->hw.init = &init; - clk_hw->flags = MEMMAP_ADDRESSING; init.name = setup->name; init.ops = ops; @@ -256,10 +248,10 @@ struct clk *ti_clk_register_dpll(struct ti_clk *setup) init.num_parents = dpll->num_parents; init.parent_names = dpll->parents; - dd->control_reg = _get_reg(dpll->module, dpll->control_reg); - dd->idlest_reg = _get_reg(dpll->module, dpll->idlest_reg); - dd->mult_div1_reg = _get_reg(dpll->module, dpll->mult_div1_reg); - dd->autoidle_reg = _get_reg(dpll->module, dpll->autoidle_reg); + _get_reg(dpll->module, dpll->control_reg, &dd->control_reg); + _get_reg(dpll->module, dpll->idlest_reg, &dd->idlest_reg); + _get_reg(dpll->module, dpll->mult_div1_reg, &dd->mult_div1_reg); + _get_reg(dpll->module, dpll->autoidle_reg, &dd->autoidle_reg); dd->modes = dpll->modes; dd->div1_mask = dpll->div1_mask; @@ -288,7 +280,7 @@ struct clk *ti_clk_register_dpll(struct ti_clk *setup) if (dpll->flags & CLKF_J_TYPE) dd->flags |= DPLL_J_TYPE; - clk = clk_register(NULL, &clk_hw->hw); + clk = ti_clk_register(NULL, &clk_hw->hw, setup->name); if (!IS_ERR(clk)) return clk; @@ -339,8 +331,24 @@ static void _register_dpll_x2(struct device_node *node, init.parent_names = &parent_name; init.num_parents = 1; +#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ + defined(CONFIG_SOC_DRA7XX) + if (hw_ops == &clkhwops_omap4_dpllmx) { + int ret; + + /* Check if register defined, if not, drop hw-ops */ + ret = of_property_count_elems_of_size(node, "reg", 1); + if (ret <= 0) { + clk_hw->ops = NULL; + } else if (ti_clk_get_reg_addr(node, 0, &clk_hw->clksel_reg)) { + kfree(clk_hw); + return; + } + } +#endif + /* register the clock */ - clk = clk_register(NULL, &clk_hw->hw); + clk = ti_clk_register(NULL, &clk_hw->hw, name); if (IS_ERR(clk)) { kfree(clk_hw); @@ -380,7 +388,6 @@ static void __init of_ti_dpll_setup(struct device_node *node, clk_hw->dpll_data = dd; clk_hw->ops = &clkhwops_omap3_dpll; clk_hw->hw.init = init; - clk_hw->flags = MEMMAP_ADDRESSING; init->name = node->name; init->ops = ops; @@ -399,7 +406,8 @@ static void __init of_ti_dpll_setup(struct device_node *node, init->parent_names = parent_names; - dd->control_reg = ti_clk_get_reg_addr(node, 0); + if (ti_clk_get_reg_addr(node, 0, &dd->control_reg)) + goto cleanup; /* * Special case for OMAP2 DPLL, register order is different due to @@ -407,25 +415,22 @@ static void __init of_ti_dpll_setup(struct device_node *node, * missing idlest_mask. */ if (!dd->idlest_mask) { - dd->mult_div1_reg = ti_clk_get_reg_addr(node, 1); + if (ti_clk_get_reg_addr(node, 1, &dd->mult_div1_reg)) + goto cleanup; #ifdef CONFIG_ARCH_OMAP2 clk_hw->ops = &clkhwops_omap2xxx_dpll; omap2xxx_clkt_dpllcore_init(&clk_hw->hw); #endif } else { - dd->idlest_reg = ti_clk_get_reg_addr(node, 1); - if (IS_ERR(dd->idlest_reg)) + if (ti_clk_get_reg_addr(node, 1, &dd->idlest_reg)) goto cleanup; - dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2); + if (ti_clk_get_reg_addr(node, 2, &dd->mult_div1_reg)) + goto cleanup; } - if (IS_ERR(dd->control_reg) || IS_ERR(dd->mult_div1_reg)) - goto cleanup; - if (dd->autoidle_mask) { - dd->autoidle_reg = ti_clk_get_reg_addr(node, 3); - if (IS_ERR(dd->autoidle_reg)) + if (ti_clk_get_reg_addr(node, 3, &dd->autoidle_reg)) goto cleanup; } diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c index 4cdd28a25584..4534de2ef455 100644 --- a/drivers/clk/ti/dpll3xxx.c +++ b/drivers/clk/ti/dpll3xxx.c @@ -54,10 +54,10 @@ static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits) dd = clk->dpll_data; - v = ti_clk_ll_ops->clk_readl(dd->control_reg); + v = ti_clk_ll_ops->clk_readl(&dd->control_reg); v &= ~dd->enable_mask; v |= clken_bits << __ffs(dd->enable_mask); - ti_clk_ll_ops->clk_writel(v, dd->control_reg); + ti_clk_ll_ops->clk_writel(v, &dd->control_reg); } /* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */ @@ -73,7 +73,7 @@ static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state) state <<= __ffs(dd->idlest_mask); - while (((ti_clk_ll_ops->clk_readl(dd->idlest_reg) & dd->idlest_mask) + while (((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask) != state) && i < MAX_DPLL_WAIT_TRIES) { i++; udelay(1); @@ -151,7 +151,7 @@ static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk) state <<= __ffs(dd->idlest_mask); /* Check if already locked */ - if ((ti_clk_ll_ops->clk_readl(dd->idlest_reg) & dd->idlest_mask) == + if ((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask) == state) goto done; @@ -317,14 +317,14 @@ static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel) * only since freqsel field is no longer present on other devices. */ if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) { - v = ti_clk_ll_ops->clk_readl(dd->control_reg); + v = ti_clk_ll_ops->clk_readl(&dd->control_reg); v &= ~dd->freqsel_mask; v |= freqsel << __ffs(dd->freqsel_mask); - ti_clk_ll_ops->clk_writel(v, dd->control_reg); + ti_clk_ll_ops->clk_writel(v, &dd->control_reg); } /* Set DPLL multiplier, divider */ - v = ti_clk_ll_ops->clk_readl(dd->mult_div1_reg); + v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); /* Handle Duty Cycle Correction */ if (dd->dcc_mask) { @@ -370,11 +370,11 @@ static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel) } } - ti_clk_ll_ops->clk_writel(v, dd->mult_div1_reg); + ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg); /* Set 4X multiplier and low-power mode */ if (dd->m4xen_mask || dd->lpmode_mask) { - v = ti_clk_ll_ops->clk_readl(dd->control_reg); + v = ti_clk_ll_ops->clk_readl(&dd->control_reg); if (dd->m4xen_mask) { if (dd->last_rounded_m4xen) @@ -390,7 +390,7 @@ static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel) v &= ~dd->lpmode_mask; } - ti_clk_ll_ops->clk_writel(v, dd->control_reg); + ti_clk_ll_ops->clk_writel(v, &dd->control_reg); } /* We let the clock framework set the other output dividers later */ @@ -652,10 +652,10 @@ static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk) dd = clk->dpll_data; - if (!dd->autoidle_reg) + if (!dd->autoidle_mask) return -EINVAL; - v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg); + v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg); v &= dd->autoidle_mask; v >>= __ffs(dd->autoidle_mask); @@ -681,7 +681,7 @@ static void omap3_dpll_allow_idle(struct clk_hw_omap *clk) dd = clk->dpll_data; - if (!dd->autoidle_reg) + if (!dd->autoidle_mask) return; /* @@ -689,10 +689,10 @@ static void omap3_dpll_allow_idle(struct clk_hw_omap *clk) * by writing 0x5 instead of 0x1. Add some mechanism to * optionally enter this mode. */ - v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg); + v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg); v &= ~dd->autoidle_mask; v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask); - ti_clk_ll_ops->clk_writel(v, dd->autoidle_reg); + ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg); } /** @@ -711,13 +711,13 @@ static void omap3_dpll_deny_idle(struct clk_hw_omap *clk) dd = clk->dpll_data; - if (!dd->autoidle_reg) + if (!dd->autoidle_mask) return; - v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg); + v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg); v &= ~dd->autoidle_mask; v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask); - ti_clk_ll_ops->clk_writel(v, dd->autoidle_reg); + ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg); } /* Clock control for DPLL outputs */ @@ -773,7 +773,7 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, WARN_ON(!dd->enable_mask); - v = ti_clk_ll_ops->clk_readl(dd->control_reg) & dd->enable_mask; + v = ti_clk_ll_ops->clk_readl(&dd->control_reg) & dd->enable_mask; v >>= __ffs(dd->enable_mask); if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE)) rate = parent_rate; diff --git a/drivers/clk/ti/dpll44xx.c b/drivers/clk/ti/dpll44xx.c index 82c05b55a7be..d7a3f7ec8d77 100644 --- a/drivers/clk/ti/dpll44xx.c +++ b/drivers/clk/ti/dpll44xx.c @@ -42,17 +42,17 @@ static void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk) u32 v; u32 mask; - if (!clk || !clk->clksel_reg) + if (!clk) return; mask = clk->flags & CLOCK_CLKOUTX2 ? OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK : OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK; - v = ti_clk_ll_ops->clk_readl(clk->clksel_reg); + v = ti_clk_ll_ops->clk_readl(&clk->clksel_reg); /* Clear the bit to allow gatectrl */ v &= ~mask; - ti_clk_ll_ops->clk_writel(v, clk->clksel_reg); + ti_clk_ll_ops->clk_writel(v, &clk->clksel_reg); } static void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk) @@ -60,17 +60,17 @@ static void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk) u32 v; u32 mask; - if (!clk || !clk->clksel_reg) + if (!clk) return; mask = clk->flags & CLOCK_CLKOUTX2 ? OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK : OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK; - v = ti_clk_ll_ops->clk_readl(clk->clksel_reg); + v = ti_clk_ll_ops->clk_readl(&clk->clksel_reg); /* Set the bit to deny gatectrl */ v |= mask; - ti_clk_ll_ops->clk_writel(v, clk->clksel_reg); + ti_clk_ll_ops->clk_writel(v, &clk->clksel_reg); } const struct clk_hw_omap_ops clkhwops_omap4_dpllmx = { @@ -128,7 +128,7 @@ unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, rate = omap2_get_dpll_rate(clk); /* regm4xen adds a multiplier of 4 to DPLL calculations */ - v = ti_clk_ll_ops->clk_readl(dd->control_reg); + v = ti_clk_ll_ops->clk_readl(&dd->control_reg); if (v & OMAP4430_DPLL_REGM4XEN_MASK) rate *= OMAP4430_REGM4XEN_MULT; diff --git a/drivers/clk/ti/fixed-factor.c b/drivers/clk/ti/fixed-factor.c index 3cd406768909..0174a51a4ba6 100644 --- a/drivers/clk/ti/fixed-factor.c +++ b/drivers/clk/ti/fixed-factor.c @@ -62,6 +62,7 @@ static void __init of_ti_fixed_factor_clk_setup(struct device_node *node) if (!IS_ERR(clk)) { of_clk_add_provider(node, of_clk_src_simple_get, clk); of_ti_clk_autoidle_setup(node); + ti_clk_add_alias(NULL, clk, clk_name); } } CLK_OF_DECLARE(ti_fixed_factor_clk, "ti,fixed-factor-clock", diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c index bc05f276f32b..7151ec3a1b07 100644 --- a/drivers/clk/ti/gate.c +++ b/drivers/clk/ti/gate.c @@ -35,7 +35,7 @@ static const struct clk_ops omap_gate_clkdm_clk_ops = { .disable = &omap2_clkops_disable_clkdm, }; -static const struct clk_ops omap_gate_clk_ops = { +const struct clk_ops omap_gate_clk_ops = { .init = &omap2_init_clk_clkdm, .enable = &omap2_dflt_clk_enable, .disable = &omap2_dflt_clk_disable, @@ -62,7 +62,7 @@ static const struct clk_ops omap_gate_clk_hsdiv_restore_ops = { */ static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *hw) { - struct clk_divider *parent; + struct clk_omap_divider *parent; struct clk_hw *parent_hw; u32 dummy_v, orig_v; int ret; @@ -72,19 +72,19 @@ static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *hw) /* Parent is the x2 node, get parent of parent for the m2 div */ parent_hw = clk_hw_get_parent(clk_hw_get_parent(hw)); - parent = to_clk_divider(parent_hw); + parent = to_clk_omap_divider(parent_hw); /* Restore the dividers */ if (!ret) { - orig_v = ti_clk_ll_ops->clk_readl(parent->reg); + orig_v = ti_clk_ll_ops->clk_readl(&parent->reg); dummy_v = orig_v; /* Write any other value different from the Read value */ dummy_v ^= (1 << parent->shift); - ti_clk_ll_ops->clk_writel(dummy_v, parent->reg); + ti_clk_ll_ops->clk_writel(dummy_v, &parent->reg); /* Write the original divider */ - ti_clk_ll_ops->clk_writel(orig_v, parent->reg); + ti_clk_ll_ops->clk_writel(orig_v, &parent->reg); } return ret; @@ -92,7 +92,7 @@ static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *hw) static struct clk *_register_gate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, - void __iomem *reg, u8 bit_idx, + struct clk_omap_reg *reg, u8 bit_idx, u8 clk_gate_flags, const struct clk_ops *ops, const struct clk_hw_omap_ops *hw_ops) { @@ -109,18 +109,18 @@ static struct clk *_register_gate(struct device *dev, const char *name, init.name = name; init.ops = ops; - clk_hw->enable_reg = reg; + memcpy(&clk_hw->enable_reg, reg, sizeof(*reg)); clk_hw->enable_bit = bit_idx; clk_hw->ops = hw_ops; - clk_hw->flags = MEMMAP_ADDRESSING | clk_gate_flags; + clk_hw->flags = clk_gate_flags; init.parent_names = &parent_name; init.num_parents = 1; init.flags = flags; - clk = clk_register(NULL, &clk_hw->hw); + clk = ti_clk_register(NULL, &clk_hw->hw, name); if (IS_ERR(clk)) kfree(clk_hw); @@ -133,8 +133,7 @@ struct clk *ti_clk_register_gate(struct ti_clk *setup) { const struct clk_ops *ops = &omap_gate_clk_ops; const struct clk_hw_omap_ops *hw_ops = NULL; - u32 reg; - struct clk_omap_reg *reg_setup; + struct clk_omap_reg reg; u32 flags = 0; u8 clk_gate_flags = 0; struct ti_clk_gate *gate; @@ -144,8 +143,6 @@ struct clk *ti_clk_register_gate(struct ti_clk *setup) if (gate->flags & CLKF_INTERFACE) return ti_clk_register_interface(setup); - reg_setup = (struct clk_omap_reg *)® - if (gate->flags & CLKF_SET_RATE_PARENT) flags |= CLK_SET_RATE_PARENT; @@ -169,11 +166,12 @@ struct clk *ti_clk_register_gate(struct ti_clk *setup) if (gate->flags & CLKF_AM35XX) hw_ops = &clkhwops_am35xx_ipss_module_wait; - reg_setup->index = gate->module; - reg_setup->offset = gate->reg; + reg.index = gate->module; + reg.offset = gate->reg; + reg.ptr = NULL; return _register_gate(NULL, setup->name, gate->parent, flags, - (void __iomem *)reg, gate->bit_shift, + ®, gate->bit_shift, clk_gate_flags, ops, hw_ops); } @@ -203,7 +201,6 @@ struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup) ops = &clkhwops_iclk_wait; gate->ops = ops; - gate->flags = MEMMAP_ADDRESSING; return &gate->hw; } @@ -215,15 +212,14 @@ static void __init _of_ti_gate_clk_setup(struct device_node *node, { struct clk *clk; const char *parent_name; - void __iomem *reg = NULL; + struct clk_omap_reg reg; u8 enable_bit = 0; u32 val; u32 flags = 0; u8 clk_gate_flags = 0; if (ops != &omap_gate_clkdm_clk_ops) { - reg = ti_clk_get_reg_addr(node, 0); - if (IS_ERR(reg)) + if (ti_clk_get_reg_addr(node, 0, ®)) return; if (!of_property_read_u32(node, "ti,bit-shift", &val)) @@ -243,7 +239,7 @@ static void __init _of_ti_gate_clk_setup(struct device_node *node, if (of_property_read_bool(node, "ti,set-bit-to-disable")) clk_gate_flags |= INVERT_ENABLE; - clk = _register_gate(NULL, node->name, parent_name, flags, reg, + clk = _register_gate(NULL, node->name, parent_name, flags, ®, enable_bit, clk_gate_flags, ops, hw_ops); if (!IS_ERR(clk)) @@ -261,15 +257,13 @@ _of_ti_composite_gate_clk_setup(struct device_node *node, if (!gate) return; - gate->enable_reg = ti_clk_get_reg_addr(node, 0); - if (IS_ERR(gate->enable_reg)) + if (ti_clk_get_reg_addr(node, 0, &gate->enable_reg)) goto cleanup; of_property_read_u32(node, "ti,bit-shift", &val); gate->enable_bit = val; gate->ops = hw_ops; - gate->flags = MEMMAP_ADDRESSING; if (!ti_clk_add_component(node, &gate->hw, CLK_COMPONENT_TYPE_GATE)) return; diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c index e505e6f8228d..62cf50c1e1e3 100644 --- a/drivers/clk/ti/interface.c +++ b/drivers/clk/ti/interface.c @@ -34,7 +34,7 @@ static const struct clk_ops ti_interface_clk_ops = { static struct clk *_register_interface(struct device *dev, const char *name, const char *parent_name, - void __iomem *reg, u8 bit_idx, + struct clk_omap_reg *reg, u8 bit_idx, const struct clk_hw_omap_ops *ops) { struct clk_init_data init = { NULL }; @@ -47,8 +47,7 @@ static struct clk *_register_interface(struct device *dev, const char *name, clk_hw->hw.init = &init; clk_hw->ops = ops; - clk_hw->flags = MEMMAP_ADDRESSING; - clk_hw->enable_reg = reg; + memcpy(&clk_hw->enable_reg, reg, sizeof(*reg)); clk_hw->enable_bit = bit_idx; init.name = name; @@ -58,7 +57,7 @@ static struct clk *_register_interface(struct device *dev, const char *name, init.num_parents = 1; init.parent_names = &parent_name; - clk = clk_register(NULL, &clk_hw->hw); + clk = ti_clk_register(NULL, &clk_hw->hw, name); if (IS_ERR(clk)) kfree(clk_hw); @@ -72,14 +71,13 @@ static struct clk *_register_interface(struct device *dev, const char *name, struct clk *ti_clk_register_interface(struct ti_clk *setup) { const struct clk_hw_omap_ops *ops = &clkhwops_iclk_wait; - u32 reg; - struct clk_omap_reg *reg_setup; + struct clk_omap_reg reg; struct ti_clk_gate *gate; gate = setup->data; - reg_setup = (struct clk_omap_reg *)® - reg_setup->index = gate->module; - reg_setup->offset = gate->reg; + reg.index = gate->module; + reg.offset = gate->reg; + reg.ptr = NULL; if (gate->flags & CLKF_NO_WAIT) ops = &clkhwops_iclk; @@ -97,7 +95,7 @@ struct clk *ti_clk_register_interface(struct ti_clk *setup) ops = &clkhwops_am35xx_ipss_wait; return _register_interface(NULL, setup->name, gate->parent, - (void __iomem *)reg, gate->bit_shift, ops); + ®, gate->bit_shift, ops); } #endif @@ -106,12 +104,11 @@ static void __init _of_ti_interface_clk_setup(struct device_node *node, { struct clk *clk; const char *parent_name; - void __iomem *reg; + struct clk_omap_reg reg; u8 enable_bit = 0; u32 val; - reg = ti_clk_get_reg_addr(node, 0); - if (IS_ERR(reg)) + if (ti_clk_get_reg_addr(node, 0, ®)) return; if (!of_property_read_u32(node, "ti,bit-shift", &val)) @@ -123,7 +120,7 @@ static void __init _of_ti_interface_clk_setup(struct device_node *node, return; } - clk = _register_interface(NULL, node->name, parent_name, reg, + clk = _register_interface(NULL, node->name, parent_name, ®, enable_bit, ops); if (!IS_ERR(clk)) diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c index 44777ab6fdeb..18c267b38461 100644 --- a/drivers/clk/ti/mux.c +++ b/drivers/clk/ti/mux.c @@ -28,7 +28,7 @@ static u8 ti_clk_mux_get_parent(struct clk_hw *hw) { - struct clk_mux *mux = to_clk_mux(hw); + struct clk_omap_mux *mux = to_clk_omap_mux(hw); int num_parents = clk_hw_get_num_parents(hw); u32 val; @@ -39,7 +39,7 @@ static u8 ti_clk_mux_get_parent(struct clk_hw *hw) * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so * val = 0x4 really means "bit 2, index starts at bit 0" */ - val = ti_clk_ll_ops->clk_readl(mux->reg) >> mux->shift; + val = ti_clk_ll_ops->clk_readl(&mux->reg) >> mux->shift; val &= mux->mask; if (mux->table) { @@ -65,7 +65,7 @@ static u8 ti_clk_mux_get_parent(struct clk_hw *hw) static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index) { - struct clk_mux *mux = to_clk_mux(hw); + struct clk_omap_mux *mux = to_clk_omap_mux(hw); u32 val; if (mux->table) { @@ -81,11 +81,11 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index) if (mux->flags & CLK_MUX_HIWORD_MASK) { val = mux->mask << (mux->shift + 16); } else { - val = ti_clk_ll_ops->clk_readl(mux->reg); + val = ti_clk_ll_ops->clk_readl(&mux->reg); val &= ~(mux->mask << mux->shift); } val |= index << mux->shift; - ti_clk_ll_ops->clk_writel(val, mux->reg); + ti_clk_ll_ops->clk_writel(val, &mux->reg); return 0; } @@ -97,12 +97,12 @@ const struct clk_ops ti_clk_mux_ops = { }; static struct clk *_register_mux(struct device *dev, const char *name, - const char **parent_names, u8 num_parents, - unsigned long flags, void __iomem *reg, - u8 shift, u32 mask, u8 clk_mux_flags, - u32 *table) + const char * const *parent_names, + u8 num_parents, unsigned long flags, + struct clk_omap_reg *reg, u8 shift, u32 mask, + u8 clk_mux_flags, u32 *table) { - struct clk_mux *mux; + struct clk_omap_mux *mux; struct clk *clk; struct clk_init_data init; @@ -120,14 +120,14 @@ static struct clk *_register_mux(struct device *dev, const char *name, init.num_parents = num_parents; /* struct clk_mux assignments */ - mux->reg = reg; + memcpy(&mux->reg, reg, sizeof(*reg)); mux->shift = shift; mux->mask = mask; mux->flags = clk_mux_flags; mux->table = table; mux->hw.init = &init; - clk = clk_register(dev, &mux->hw); + clk = ti_clk_register(dev, &mux->hw, name); if (IS_ERR(clk)) kfree(mux); @@ -140,12 +140,9 @@ struct clk *ti_clk_register_mux(struct ti_clk *setup) struct ti_clk_mux *mux; u32 flags; u8 mux_flags = 0; - struct clk_omap_reg *reg_setup; - u32 reg; + struct clk_omap_reg reg; u32 mask; - reg_setup = (struct clk_omap_reg *)® - mux = setup->data; flags = CLK_SET_RATE_NO_REPARENT; @@ -154,8 +151,9 @@ struct clk *ti_clk_register_mux(struct ti_clk *setup) mask--; mask = (1 << fls(mask)) - 1; - reg_setup->index = mux->module; - reg_setup->offset = mux->reg; + reg.index = mux->module; + reg.offset = mux->reg; + reg.ptr = NULL; if (mux->flags & CLKF_INDEX_STARTS_AT_ONE) mux_flags |= CLK_MUX_INDEX_ONE; @@ -164,7 +162,7 @@ struct clk *ti_clk_register_mux(struct ti_clk *setup) flags |= CLK_SET_RATE_PARENT; return _register_mux(NULL, setup->name, mux->parents, mux->num_parents, - flags, (void __iomem *)reg, mux->bit_shift, mask, + flags, ®, mux->bit_shift, mask, mux_flags, NULL); } @@ -177,7 +175,7 @@ struct clk *ti_clk_register_mux(struct ti_clk *setup) static void of_mux_clk_setup(struct device_node *node) { struct clk *clk; - void __iomem *reg; + struct clk_omap_reg reg; unsigned int num_parents; const char **parent_names; u8 clk_mux_flags = 0; @@ -196,9 +194,7 @@ static void of_mux_clk_setup(struct device_node *node) of_clk_parent_fill(node, parent_names, num_parents); - reg = ti_clk_get_reg_addr(node, 0); - - if (IS_ERR(reg)) + if (ti_clk_get_reg_addr(node, 0, ®)) goto cleanup; of_property_read_u32(node, "ti,bit-shift", &shift); @@ -217,7 +213,7 @@ static void of_mux_clk_setup(struct device_node *node) mask = (1 << fls(mask)) - 1; clk = _register_mux(NULL, node->name, parent_names, num_parents, - flags, reg, shift, mask, clk_mux_flags, NULL); + flags, ®, shift, mask, clk_mux_flags, NULL); if (!IS_ERR(clk)) of_clk_add_provider(node, of_clk_src_simple_get, clk); @@ -229,8 +225,7 @@ CLK_OF_DECLARE(mux_clk, "ti,mux-clock", of_mux_clk_setup); struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup) { - struct clk_mux *mux; - struct clk_omap_reg *reg; + struct clk_omap_mux *mux; int num_parents; if (!setup) @@ -240,12 +235,10 @@ struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup) if (!mux) return ERR_PTR(-ENOMEM); - reg = (struct clk_omap_reg *)&mux->reg; - mux->shift = setup->bit_shift; - reg->index = setup->module; - reg->offset = setup->reg; + mux->reg.index = setup->module; + mux->reg.offset = setup->reg; if (setup->flags & CLKF_INDEX_STARTS_AT_ONE) mux->flags |= CLK_MUX_INDEX_ONE; @@ -260,7 +253,7 @@ struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup) static void __init of_ti_composite_mux_clk_setup(struct device_node *node) { - struct clk_mux *mux; + struct clk_omap_mux *mux; unsigned int num_parents; u32 val; @@ -268,9 +261,7 @@ static void __init of_ti_composite_mux_clk_setup(struct device_node *node) if (!mux) return; - mux->reg = ti_clk_get_reg_addr(node, 0); - - if (IS_ERR(mux->reg)) + if (ti_clk_get_reg_addr(node, 0, &mux->reg)) goto cleanup; if (!of_property_read_u32(node, "ti,bit-shift", &val)) diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c index 2b60577703ef..f99abc1106f0 100644 --- a/drivers/clk/x86/clk-pmc-atom.c +++ b/drivers/clk/x86/clk-pmc-atom.c @@ -54,6 +54,7 @@ struct clk_plt_data { struct clk_plt_fixed **parents; u8 nparents; struct clk_plt *clks[PMC_CLK_NUM]; + struct clk_lookup *mclk_lookup; }; /* Return an index in parent table */ @@ -337,6 +338,11 @@ static int plt_clk_probe(struct platform_device *pdev) goto err_unreg_clk_plt; } } + data->mclk_lookup = clkdev_hw_create(&data->clks[3]->hw, "mclk", NULL); + if (!data->mclk_lookup) { + err = -ENOMEM; + goto err_unreg_clk_plt; + } plt_clk_free_parent_names_loop(parent_names, data->nparents); @@ -356,6 +362,7 @@ static int plt_clk_remove(struct platform_device *pdev) data = platform_get_drvdata(pdev); + clkdev_drop(data->mclk_lookup); plt_clk_unregister_loop(data, PMC_CLK_NUM); plt_clk_unregister_parents(data); return 0; diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c index 2f7c668643fe..a10962988ba8 100644 --- a/drivers/clk/zte/clk-zx296718.c +++ b/drivers/clk/zte/clk-zx296718.c @@ -94,13 +94,36 @@ static DEFINE_SPINLOCK(clk_lock); -static struct zx_pll_config pll_cpu_table[] = { +static const struct zx_pll_config pll_cpu_table[] = { PLL_RATE(1312000000, 0x00103621, 0x04aaaaaa), PLL_RATE(1407000000, 0x00103a21, 0x04aaaaaa), PLL_RATE(1503000000, 0x00103e21, 0x04aaaaaa), PLL_RATE(1600000000, 0x00104221, 0x04aaaaaa), }; +static const struct zx_pll_config pll_vga_table[] = { + PLL_RATE(36000000, 0x00102464, 0x04000000), /* 800x600@56 */ + PLL_RATE(40000000, 0x00102864, 0x04000000), /* 800x600@60 */ + PLL_RATE(49500000, 0x00103164, 0x04800000), /* 800x600@75 */ + PLL_RATE(50000000, 0x00103264, 0x04000000), /* 800x600@72 */ + PLL_RATE(56250000, 0x00103864, 0x04400000), /* 800x600@85 */ + PLL_RATE(65000000, 0x00104164, 0x04000000), /* 1024x768@60 */ + PLL_RATE(74375000, 0x00104a64, 0x04600000), /* 1280x720@60 */ + PLL_RATE(75000000, 0x00104b64, 0x04800000), /* 1024x768@70 */ + PLL_RATE(78750000, 0x00104e64, 0x04c00000), /* 1024x768@75 */ + PLL_RATE(85500000, 0x00105564, 0x04800000), /* 1360x768@60 */ + PLL_RATE(106500000, 0x00106a64, 0x04800000), /* 1440x900@60 */ + PLL_RATE(108000000, 0x00106c64, 0x04000000), /* 1280x1024@60 */ + PLL_RATE(110000000, 0x00106e64, 0x04000000), /* 1024x768@85 */ + PLL_RATE(135000000, 0x00105a44, 0x04000000), /* 1280x1024@75 */ + PLL_RATE(136750000, 0x00104462, 0x04600000), /* 1440x900@75 */ + PLL_RATE(148500000, 0x00104a62, 0x04400000), /* 1920x1080@60 */ + PLL_RATE(157000000, 0x00104e62, 0x04800000), /* 1440x900@85 */ + PLL_RATE(157500000, 0x00104e62, 0x04c00000), /* 1280x1024@85 */ + PLL_RATE(162000000, 0x00105162, 0x04000000), /* 1600x1200@60 */ + PLL_RATE(193250000, 0x00106062, 0x04a00000), /* 1920x1200@60 */ +}; + PNAME(osc) = { "osc24m", "osc32k", @@ -369,6 +392,7 @@ PNAME(wdt_ares_p) = { static struct clk_zx_pll zx296718_pll_clk[] = { ZX296718_PLL("pll_cpu", "osc24m", PLL_CPU_REG, pll_cpu_table), + ZX296718_PLL("pll_vga", "osc24m", PLL_VGA_REG, pll_vga_table), }; static struct zx_clk_fixed_factor top_ffactor_clk[] = { @@ -409,7 +433,7 @@ static struct zx_clk_fixed_factor top_ffactor_clk[] = { FFACTOR(0, "clk54m", "pll_mm1", 1, 24, 0), /* vga */ FFACTOR(0, "pll_vga_1800m", "pll_vga", 1, 1, 0), - FFACTOR(0, "clk_vga", "pll_vga", 1, 2, 0), + FFACTOR(0, "clk_vga", "pll_vga", 1, 1, CLK_SET_RATE_PARENT), /* pll ddr */ FFACTOR(0, "clk466m", "pll_ddr", 1, 2, 0), @@ -458,8 +482,8 @@ static struct zx_clk_mux top_mux_clk[] = { MUX(0, "sappu_a_mux", sappu_aclk_p, TOP_CLK_MUX5, 4, 2), MUX(0, "sappu_w_mux", sappu_wclk_p, TOP_CLK_MUX5, 8, 3), MUX(0, "vou_a_mux", vou_aclk_p, TOP_CLK_MUX7, 0, 3), - MUX(0, "vou_main_w_mux", vou_main_wclk_p, TOP_CLK_MUX7, 4, 3), - MUX(0, "vou_aux_w_mux", vou_aux_wclk_p, TOP_CLK_MUX7, 8, 3), + MUX_F(0, "vou_main_w_mux", vou_main_wclk_p, TOP_CLK_MUX7, 4, 3, CLK_SET_RATE_PARENT, 0), + MUX_F(0, "vou_aux_w_mux", vou_aux_wclk_p, TOP_CLK_MUX7, 8, 3, CLK_SET_RATE_PARENT, 0), MUX(0, "vou_ppu_w_mux", vou_ppu_wclk_p, TOP_CLK_MUX7, 12, 3), MUX(0, "vga_i2c_mux", vga_i2c_wclk_p, TOP_CLK_MUX7, 16, 1), MUX(0, "viu_m0_a_mux", viu_m0_aclk_p, TOP_CLK_MUX6, 0, 3), diff --git a/drivers/clk/zte/clk.c b/drivers/clk/zte/clk.c index 878d879b23ff..b82031766ffa 100644 --- a/drivers/clk/zte/clk.c +++ b/drivers/clk/zte/clk.c @@ -52,7 +52,10 @@ static int hw_to_idx(struct clk_zx_pll *zx_pll) /* For matching the value in lookup table */ hw_cfg0 &= ~BIT(zx_pll->lock_bit); - hw_cfg0 |= BIT(zx_pll->pd_bit); + + /* Check availability of pd_bit */ + if (zx_pll->pd_bit < 32) + hw_cfg0 |= BIT(zx_pll->pd_bit); for (i = 0; i < zx_pll->count; i++) { if (hw_cfg0 == config[i].cfg0 && hw_cfg1 == config[i].cfg1) @@ -108,6 +111,10 @@ static int zx_pll_enable(struct clk_hw *hw) struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw); u32 reg; + /* If pd_bit is not available, simply return success. */ + if (zx_pll->pd_bit > 31) + return 0; + reg = readl_relaxed(zx_pll->reg_base); writel_relaxed(reg & ~BIT(zx_pll->pd_bit), zx_pll->reg_base); @@ -120,6 +127,9 @@ static void zx_pll_disable(struct clk_hw *hw) struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw); u32 reg; + if (zx_pll->pd_bit > 31) + return; + reg = readl_relaxed(zx_pll->reg_base); writel_relaxed(reg | BIT(zx_pll->pd_bit), zx_pll->reg_base); } diff --git a/drivers/clk/zte/clk.h b/drivers/clk/zte/clk.h index 84a55a3e2bd4..4df0f121b56d 100644 --- a/drivers/clk/zte/clk.h +++ b/drivers/clk/zte/clk.h @@ -66,8 +66,12 @@ struct clk_zx_pll { CLK_GET_RATE_NOCACHE), \ } +/* + * The pd_bit is not available on ZX296718, so let's pass something + * bigger than 31, e.g. 0xff, to indicate that. + */ #define ZX296718_PLL(_name, _parent, _reg, _table) \ -ZX_PLL(_name, _parent, _reg, _table, 0, 30) +ZX_PLL(_name, _parent, _reg, _table, 0xff, 30) struct zx_clk_gate { struct clk_gate gate; diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c index 9a7e37cf56b0..a1df588343f2 100644 --- a/drivers/clocksource/cs5535-clockevt.c +++ b/drivers/clocksource/cs5535-clockevt.c @@ -22,7 +22,7 @@ #define DRV_NAME "cs5535-clockevt" static int timer_irq; -module_param_named(irq, timer_irq, int, 0644); +module_param_hw_named(irq, timer_irq, int, irq, 0644); MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks."); /* diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c index 770a9ae1999a..37b30071c220 100644 --- a/drivers/cpufreq/speedstep-smi.c +++ b/drivers/cpufreq/speedstep-smi.c @@ -378,7 +378,7 @@ static void __exit speedstep_exit(void) cpufreq_unregister_driver(&speedstep_driver); } -module_param(smi_port, int, 0444); +module_param_hw(smi_port, int, ioport, 0444); module_param(smi_cmd, int, 0444); module_param(smi_sig, uint, 0444); diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 548b90be7685..2706be7ed334 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -111,7 +111,8 @@ void cpuidle_use_deepest_state(bool enable) preempt_disable(); dev = cpuidle_get_device(); - dev->use_deepest_state = enable; + if (dev) + dev->use_deepest_state = enable; preempt_enable(); } diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index 21472e427f6f..a111cd72797b 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -119,8 +119,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi) names[i] = vi->data_vq[i].name; } - ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, - names, NULL); + ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL); if (ret) goto err_find; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d01d59812cf3..24e8597b2c3e 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -514,12 +514,12 @@ config TIMB_DMA Enable support for the Timberdale FPGA DMA engine. config TI_CPPI41 - tristate "AM33xx CPPI41 DMA support" - depends on ARCH_OMAP + tristate "CPPI 4.1 DMA support" + depends on (ARCH_OMAP || ARCH_DAVINCI_DA8XX) select DMA_ENGINE help The Communications Port Programming Interface (CPPI) 4.1 DMA engine - is currently used by the USB driver on AM335x platforms. + is currently used by the USB driver on AM335x and DA8xx platforms. config TI_DMA_CROSSBAR bool @@ -608,6 +608,7 @@ config ASYNC_TX_DMA config DMATEST tristate "DMA Test client" depends on DMA_ENGINE + select DMA_ENGINE_RAID help Simple DMA test client. Say N unless you're debugging a DMA Device driver. diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 0b7c6ce629a6..6bb8813ca275 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -106,6 +106,7 @@ struct pl08x_driver_data; /** * struct vendor_data - vendor-specific config parameters for PL08x derivatives + * @config_offset: offset to the configuration register * @channels: the number of channels available in this variant * @signals: the number of request signals available from the hardware * @dualmaster: whether this version supports dual AHB masters or not. @@ -145,6 +146,8 @@ struct pl08x_bus_data { /** * struct pl08x_phy_chan - holder for the physical channels * @id: physical index to this channel + * @base: memory base address for this physical channel + * @reg_config: configuration address for this physical channel * @lock: a lock to use when altering an instance of this struct * @serving: the virtual channel currently being served by this physical * channel @@ -203,7 +206,7 @@ struct pl08x_txd { }; /** - * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel + * enum pl08x_dma_chan_state - holds the PL08x specific virtual channel * states * @PL08X_CHAN_IDLE: the channel is idle * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport @@ -226,9 +229,8 @@ enum pl08x_dma_chan_state { * @phychan: the physical channel utilized by this channel, if there is one * @name: name of channel * @cd: channel platform data - * @runtime_addr: address for RX/TX according to the runtime config + * @cfg: slave configuration * @at: active transaction on this channel - * @lock: a lock for this channel data * @host: a pointer to the host (internal use) * @state: whether the channel is idle, paused, running etc * @slave: whether this channel is a device (slave) or for memcpy @@ -262,7 +264,7 @@ struct pl08x_dma_chan { * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI * fetches * @mem_buses: set to indicate memory transfers on AHB2. - * @lock: a spinlock for this struct + * @lli_words: how many words are used in each LLI item for this variant */ struct pl08x_driver_data { struct dma_device slave; @@ -417,7 +419,7 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) /* Enable the DMA channel */ /* Do not access config register until channel shows as disabled */ - while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) + while (readl(pl08x->base + PL080_EN_CHAN) & BIT(phychan->id)) cpu_relax(); /* Do not access config register until channel shows as inactive */ @@ -484,8 +486,8 @@ static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, writel(val, ch->reg_config); - writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); - writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); + writel(BIT(ch->id), pl08x->base + PL080_ERR_CLEAR); + writel(BIT(ch->id), pl08x->base + PL080_TC_CLEAR); } static inline u32 get_bytes_in_cctl(u32 cctl) @@ -1834,7 +1836,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) return IRQ_NONE; for (i = 0; i < pl08x->vd->channels; i++) { - if (((1 << i) & err) || ((1 << i) & tc)) { + if ((BIT(i) & err) || (BIT(i) & tc)) { /* Locate physical channel */ struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; struct pl08x_dma_chan *plchan = phychan->serving; @@ -1872,7 +1874,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) } spin_unlock(&plchan->vc.lock); - mask |= (1 << i); + mask |= BIT(i); } } diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index d74cee077842..f7e965f63274 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -68,7 +68,6 @@ #define QMGR_MEMCTRL_IDX_SH 16 #define QMGR_MEMCTRL_DESC_SH 8 -#define QMGR_NUM_PEND 5 #define QMGR_PEND(x) (0x90 + (x) * 4) #define QMGR_PENDING_SLOT_Q(x) (x / 32) @@ -131,7 +130,6 @@ struct cppi41_dd { u32 first_td_desc; struct cppi41_channel *chan_busy[ALLOC_DECS_NUM]; - void __iomem *usbss_mem; void __iomem *ctrl_mem; void __iomem *sched_mem; void __iomem *qmgr_mem; @@ -139,6 +137,10 @@ struct cppi41_dd { const struct chan_queues *queues_rx; const struct chan_queues *queues_tx; struct chan_queues td_queue; + u16 first_completion_queue; + u16 qmgr_num_pend; + u32 n_chans; + u8 platform; struct list_head pending; /* Pending queued transfers */ spinlock_t lock; /* Lock for pending list */ @@ -149,8 +151,7 @@ struct cppi41_dd { bool is_suspended; }; -#define FIST_COMPLETION_QUEUE 93 -static struct chan_queues usb_queues_tx[] = { +static struct chan_queues am335x_usb_queues_tx[] = { /* USB0 ENDP 1 */ [ 0] = { .submit = 32, .complete = 93}, [ 1] = { .submit = 34, .complete = 94}, @@ -186,7 +187,7 @@ static struct chan_queues usb_queues_tx[] = { [29] = { .submit = 90, .complete = 139}, }; -static const struct chan_queues usb_queues_rx[] = { +static const struct chan_queues am335x_usb_queues_rx[] = { /* USB0 ENDP 1 */ [ 0] = { .submit = 1, .complete = 109}, [ 1] = { .submit = 2, .complete = 110}, @@ -222,11 +223,26 @@ static const struct chan_queues usb_queues_rx[] = { [29] = { .submit = 30, .complete = 155}, }; +static const struct chan_queues da8xx_usb_queues_tx[] = { + [0] = { .submit = 16, .complete = 24}, + [1] = { .submit = 18, .complete = 24}, + [2] = { .submit = 20, .complete = 24}, + [3] = { .submit = 22, .complete = 24}, +}; + +static const struct chan_queues da8xx_usb_queues_rx[] = { + [0] = { .submit = 1, .complete = 26}, + [1] = { .submit = 3, .complete = 26}, + [2] = { .submit = 5, .complete = 26}, + [3] = { .submit = 7, .complete = 26}, +}; + struct cppi_glue_infos { - irqreturn_t (*isr)(int irq, void *data); const struct chan_queues *queues_rx; const struct chan_queues *queues_tx; struct chan_queues td_queue; + u16 first_completion_queue; + u16 qmgr_num_pend; }; static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c) @@ -285,19 +301,21 @@ static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) static irqreturn_t cppi41_irq(int irq, void *data) { struct cppi41_dd *cdd = data; + u16 first_completion_queue = cdd->first_completion_queue; + u16 qmgr_num_pend = cdd->qmgr_num_pend; struct cppi41_channel *c; int i; - for (i = QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE); i < QMGR_NUM_PEND; + for (i = QMGR_PENDING_SLOT_Q(first_completion_queue); i < qmgr_num_pend; i++) { u32 val; u32 q_num; val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i)); - if (i == QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE) && val) { + if (i == QMGR_PENDING_SLOT_Q(first_completion_queue) && val) { u32 mask; /* set corresponding bit for completetion Q 93 */ - mask = 1 << QMGR_PENDING_BIT_Q(FIST_COMPLETION_QUEUE); + mask = 1 << QMGR_PENDING_BIT_Q(first_completion_queue); /* not set all bits for queues less than Q 93 */ mask--; /* now invert and keep only Q 93+ set */ @@ -402,11 +420,9 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan, struct cppi41_channel *c = to_cpp41_chan(chan); enum dma_status ret; - /* lock */ ret = dma_cookie_status(chan, cookie, txstate); - if (txstate && ret == DMA_COMPLETE) - txstate->residue = c->residue; - /* unlock */ + + dma_set_residue(txstate, c->residue); return ret; } @@ -630,7 +646,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) if (!c->is_tx) { reg |= GCR_STARV_RETRY; reg |= GCR_DESC_TYPE_HOST; - reg |= c->q_comp_num; + reg |= cdd->td_queue.complete; } reg |= GCR_TEARDOWN; cppi_writel(reg, c->gcr_reg); @@ -641,7 +657,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) if (!c->td_seen || !c->td_desc_seen) { desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete); - if (!desc_phys) + if (!desc_phys && c->is_tx) desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); if (desc_phys == c->desc_phys) { @@ -723,39 +739,24 @@ static int cppi41_stop_chan(struct dma_chan *chan) return 0; } -static void cleanup_chans(struct cppi41_dd *cdd) -{ - while (!list_empty(&cdd->ddev.channels)) { - struct cppi41_channel *cchan; - - cchan = list_first_entry(&cdd->ddev.channels, - struct cppi41_channel, chan.device_node); - list_del(&cchan->chan.device_node); - kfree(cchan); - } -} - static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd) { - struct cppi41_channel *cchan; + struct cppi41_channel *cchan, *chans; int i; - int ret; - u32 n_chans; + u32 n_chans = cdd->n_chans; - ret = of_property_read_u32(dev->of_node, "#dma-channels", - &n_chans); - if (ret) - return ret; /* * The channels can only be used as TX or as RX. So we add twice * that much dma channels because USB can only do RX or TX. */ n_chans *= 2; + chans = devm_kcalloc(dev, n_chans, sizeof(*chans), GFP_KERNEL); + if (!chans) + return -ENOMEM; + for (i = 0; i < n_chans; i++) { - cchan = kzalloc(sizeof(*cchan), GFP_KERNEL); - if (!cchan) - goto err; + cchan = &chans[i]; cchan->cdd = cdd; if (i & 1) { @@ -775,9 +776,6 @@ static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd) cdd->first_td_desc = n_chans; return 0; -err: - cleanup_chans(cdd); - return -ENOMEM; } static void purge_descs(struct device *dev, struct cppi41_dd *cdd) @@ -859,7 +857,7 @@ static void init_sched(struct cppi41_dd *cdd) word = 0; cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); - for (ch = 0; ch < 15 * 2; ch += 2) { + for (ch = 0; ch < cdd->n_chans; ch += 2) { reg = SCHED_ENTRY0_CHAN(ch); reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX; @@ -869,7 +867,7 @@ static void init_sched(struct cppi41_dd *cdd) cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word)); word++; } - reg = 15 * 2 * 2 - 1; + reg = cdd->n_chans * 2 - 1; reg |= DMA_SCHED_CTRL_EN; cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); } @@ -885,7 +883,7 @@ static int init_cppi41(struct device *dev, struct cppi41_dd *cdd) return -ENOMEM; cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); - cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); + cppi_writel(TOTAL_DESCS_NUM, cdd->qmgr_mem + QMGR_LRAM_SIZE); cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); ret = init_descs(dev, cdd); @@ -894,6 +892,7 @@ static int init_cppi41(struct device *dev, struct cppi41_dd *cdd) cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ); init_sched(cdd); + return 0; err_td: deinit_cppi41(dev, cdd); @@ -933,8 +932,9 @@ static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param) else queues = cdd->queues_rx; - BUILD_BUG_ON(ARRAY_SIZE(usb_queues_rx) != ARRAY_SIZE(usb_queues_tx)); - if (WARN_ON(cchan->port_num > ARRAY_SIZE(usb_queues_rx))) + BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) != + ARRAY_SIZE(am335x_usb_queues_tx)); + if (WARN_ON(cchan->port_num > ARRAY_SIZE(am335x_usb_queues_rx))) return false; cchan->q_num = queues[cchan->port_num].submit; @@ -962,15 +962,25 @@ static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec, &dma_spec->args[0]); } -static const struct cppi_glue_infos usb_infos = { - .isr = cppi41_irq, - .queues_rx = usb_queues_rx, - .queues_tx = usb_queues_tx, +static const struct cppi_glue_infos am335x_usb_infos = { + .queues_rx = am335x_usb_queues_rx, + .queues_tx = am335x_usb_queues_tx, .td_queue = { .submit = 31, .complete = 0 }, + .first_completion_queue = 93, + .qmgr_num_pend = 5, +}; + +static const struct cppi_glue_infos da8xx_usb_infos = { + .queues_rx = da8xx_usb_queues_rx, + .queues_tx = da8xx_usb_queues_tx, + .td_queue = { .submit = 31, .complete = 0 }, + .first_completion_queue = 24, + .qmgr_num_pend = 2, }; static const struct of_device_id cppi41_dma_ids[] = { - { .compatible = "ti,am3359-cppi41", .data = &usb_infos}, + { .compatible = "ti,am3359-cppi41", .data = &am335x_usb_infos}, + { .compatible = "ti,da830-cppi41", .data = &da8xx_usb_infos}, {}, }; MODULE_DEVICE_TABLE(of, cppi41_dma_ids); @@ -995,6 +1005,8 @@ static int cppi41_dma_probe(struct platform_device *pdev) struct cppi41_dd *cdd; struct device *dev = &pdev->dev; const struct cppi_glue_infos *glue_info; + struct resource *mem; + int index; int irq; int ret; @@ -1021,19 +1033,31 @@ static int cppi41_dma_probe(struct platform_device *pdev) INIT_LIST_HEAD(&cdd->ddev.channels); cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; - cdd->usbss_mem = of_iomap(dev->of_node, 0); - cdd->ctrl_mem = of_iomap(dev->of_node, 1); - cdd->sched_mem = of_iomap(dev->of_node, 2); - cdd->qmgr_mem = of_iomap(dev->of_node, 3); + index = of_property_match_string(dev->of_node, + "reg-names", "controller"); + if (index < 0) + return index; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, index); + cdd->ctrl_mem = devm_ioremap_resource(dev, mem); + if (IS_ERR(cdd->ctrl_mem)) + return PTR_ERR(cdd->ctrl_mem); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 1); + cdd->sched_mem = devm_ioremap_resource(dev, mem); + if (IS_ERR(cdd->sched_mem)) + return PTR_ERR(cdd->sched_mem); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 2); + cdd->qmgr_mem = devm_ioremap_resource(dev, mem); + if (IS_ERR(cdd->qmgr_mem)) + return PTR_ERR(cdd->qmgr_mem); + spin_lock_init(&cdd->lock); INIT_LIST_HEAD(&cdd->pending); platform_set_drvdata(pdev, cdd); - if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || - !cdd->qmgr_mem) - return -ENXIO; - pm_runtime_enable(dev); pm_runtime_set_autosuspend_delay(dev, 100); pm_runtime_use_autosuspend(dev); @@ -1044,6 +1068,13 @@ static int cppi41_dma_probe(struct platform_device *pdev) cdd->queues_rx = glue_info->queues_rx; cdd->queues_tx = glue_info->queues_tx; cdd->td_queue = glue_info->td_queue; + cdd->qmgr_num_pend = glue_info->qmgr_num_pend; + cdd->first_completion_queue = glue_info->first_completion_queue; + + ret = of_property_read_u32(dev->of_node, + "#dma-channels", &cdd->n_chans); + if (ret) + goto err_get_n_chans; ret = init_cppi41(dev, cdd); if (ret) @@ -1056,18 +1087,18 @@ static int cppi41_dma_probe(struct platform_device *pdev) irq = irq_of_parse_and_map(dev->of_node, 0); if (!irq) { ret = -EINVAL; - goto err_irq; + goto err_chans; } - ret = devm_request_irq(&pdev->dev, irq, glue_info->isr, IRQF_SHARED, + ret = devm_request_irq(&pdev->dev, irq, cppi41_irq, IRQF_SHARED, dev_name(dev), cdd); if (ret) - goto err_irq; + goto err_chans; cdd->irq = irq; ret = dma_async_device_register(&cdd->ddev); if (ret) - goto err_dma_reg; + goto err_chans; ret = of_dma_controller_register(dev->of_node, cppi41_dma_xlate, &cpp41_dma_info); @@ -1080,20 +1111,14 @@ static int cppi41_dma_probe(struct platform_device *pdev) return 0; err_of: dma_async_device_unregister(&cdd->ddev); -err_dma_reg: -err_irq: - cleanup_chans(cdd); err_chans: deinit_cppi41(dev, cdd); err_init_cppi: pm_runtime_dont_use_autosuspend(dev); +err_get_n_chans: err_get_sync: pm_runtime_put_sync(dev); pm_runtime_disable(dev); - iounmap(cdd->usbss_mem); - iounmap(cdd->ctrl_mem); - iounmap(cdd->sched_mem); - iounmap(cdd->qmgr_mem); return ret; } @@ -1110,12 +1135,7 @@ static int cppi41_dma_remove(struct platform_device *pdev) dma_async_device_unregister(&cdd->ddev); devm_free_irq(&pdev->dev, cdd->irq, cdd); - cleanup_chans(cdd); deinit_cppi41(&pdev->dev, cdd); - iounmap(cdd->usbss_mem); - iounmap(cdd->ctrl_mem); - iounmap(cdd->sched_mem); - iounmap(cdd->qmgr_mem); pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 54d581d407aa..a07ef3d6b3ec 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -535,6 +535,13 @@ static int dmatest_func(void *data) total_tests++; + /* Check if buffer count fits into map count variable (u8) */ + if ((src_cnt + dst_cnt) >= 255) { + pr_err("too many buffers (%d of 255 supported)\n", + src_cnt + dst_cnt); + break; + } + if (1 << align > params->buf_size) { pr_err("%u-byte buffer too small for %d-byte alignment\n", params->buf_size, 1 << align); @@ -585,7 +592,7 @@ static int dmatest_func(void *data) for (i = 0; i < src_cnt; i++) { void *buf = thread->srcs[i]; struct page *pg = virt_to_page(buf); - unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; + unsigned long pg_off = offset_in_page(buf); um->addr[i] = dma_map_page(dev->dev, pg, pg_off, um->len, DMA_TO_DEVICE); @@ -605,7 +612,7 @@ static int dmatest_func(void *data) for (i = 0; i < dst_cnt; i++) { void *buf = thread->dsts[i]; struct page *pg = virt_to_page(buf); - unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; + unsigned long pg_off = offset_in_page(buf); dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, DMA_BIDIRECTIONAL); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index d1651a50c349..085993cb2ccc 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -937,6 +937,21 @@ static int sdma_disable_channel(struct dma_chan *chan) return 0; } +static int sdma_disable_channel_with_delay(struct dma_chan *chan) +{ + sdma_disable_channel(chan); + + /* + * According to NXP R&D team a delay of one BD SDMA cost time + * (maximum is 1ms) should be added after disable of the channel + * bit, to ensure SDMA core has really been stopped after SDMA + * clients call .device_terminate_all. + */ + mdelay(1); + + return 0; +} + static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) { struct sdma_engine *sdma = sdmac->sdma; @@ -1828,11 +1843,11 @@ static int sdma_probe(struct platform_device *pdev) sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; sdma->dma_device.device_config = sdma_config; - sdma->dma_device.device_terminate_all = sdma_disable_channel; + sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay; sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; sdma->dma_device.device_issue_pending = sdma_issue_pending; sdma->dma_device.dev->dma_parms = &sdma->dma_parms; dma_set_max_seg_size(sdma->dma_device.dev, 65535); diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index cc5259b881d4..6ad4384b3fa8 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -760,9 +760,7 @@ ioat_init_channel(struct ioatdma_device *ioat_dma, dma_cookie_init(&ioat_chan->dma_chan); list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); ioat_dma->idx[idx] = ioat_chan; - init_timer(&ioat_chan->timer); - ioat_chan->timer.function = ioat_timer_event; - ioat_chan->timer.data = data; + setup_timer(&ioat_chan->timer, ioat_timer_event, data); tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data); } diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 0cb951b743a6..25bc5b103aa2 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -960,7 +960,7 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) } src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), - (size_t)src & ~PAGE_MASK, PAGE_SIZE, + offset_in_page(src), PAGE_SIZE, DMA_TO_DEVICE); unmap->addr[0] = src_dma; @@ -972,7 +972,7 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) unmap->to_cnt = 1; dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), - (size_t)dest & ~PAGE_MASK, PAGE_SIZE, + offset_in_page(dest), PAGE_SIZE, DMA_FROM_DEVICE); unmap->addr[1] = dest_dma; @@ -1580,11 +1580,6 @@ static int mv_xor_probe(struct platform_device *pdev) int irq; cd = &pdata->channels[i]; - if (!cd) { - ret = -ENODEV; - goto err_channel_add; - } - irq = platform_get_irq(pdev, i); if (irq < 0) { ret = irq; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index f37f4978dabb..8b0da7fa520d 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -22,7 +22,6 @@ #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/amba/bus.h> -#include <linux/amba/pl330.h> #include <linux/scatterlist.h> #include <linux/of.h> #include <linux/of_dma.h> @@ -2077,18 +2076,6 @@ static void pl330_tasklet(unsigned long data) } } -bool pl330_filter(struct dma_chan *chan, void *param) -{ - u8 *peri_id; - - if (chan->device->dev->driver != &pl330_driver.drv) - return false; - - peri_id = chan->private; - return *peri_id == (unsigned long)param; -} -EXPORT_SYMBOL(pl330_filter); - static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { @@ -2833,7 +2820,6 @@ static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume); static int pl330_probe(struct amba_device *adev, const struct amba_id *id) { - struct dma_pl330_platdata *pdat; struct pl330_config *pcfg; struct pl330_dmac *pl330; struct dma_pl330_chan *pch, *_p; @@ -2843,8 +2829,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) int num_chan; struct device_node *np = adev->dev.of_node; - pdat = dev_get_platdata(&adev->dev); - ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32)); if (ret) return ret; @@ -2857,7 +2841,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pd = &pl330->ddma; pd->dev = &adev->dev; - pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0; + pl330->mcbufsz = 0; /* get quirk */ for (i = 0; i < ARRAY_SIZE(of_quirks); i++) @@ -2901,10 +2885,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) INIT_LIST_HEAD(&pd->channels); /* Initialize channel parameters */ - if (pdat) - num_chan = max_t(int, pdat->nr_valid_peri, pcfg->num_chan); - else - num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan); + num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan); pl330->num_peripherals = num_chan; @@ -2916,11 +2897,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) for (i = 0; i < num_chan; i++) { pch = &pl330->peripherals[i]; - if (!adev->dev.of_node) - pch->chan.private = pdat ? &pdat->peri_id[i] : NULL; - else - pch->chan.private = adev->dev.of_node; + pch->chan.private = adev->dev.of_node; INIT_LIST_HEAD(&pch->submitted_list); INIT_LIST_HEAD(&pch->work_list); INIT_LIST_HEAD(&pch->completed_list); @@ -2933,15 +2911,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) list_add_tail(&pch->chan.device_node, &pd->channels); } - if (pdat) { - pd->cap_mask = pdat->cap_mask; - } else { - dma_cap_set(DMA_MEMCPY, pd->cap_mask); - if (pcfg->num_peri) { - dma_cap_set(DMA_SLAVE, pd->cap_mask); - dma_cap_set(DMA_CYCLIC, pd->cap_mask); - dma_cap_set(DMA_PRIVATE, pd->cap_mask); - } + dma_cap_set(DMA_MEMCPY, pd->cap_mask); + if (pcfg->num_peri) { + dma_cap_set(DMA_SLAVE, pd->cap_mask); + dma_cap_set(DMA_CYCLIC, pd->cap_mask); + dma_cap_set(DMA_PRIVATE, pd->cap_mask); } pd->device_alloc_chan_resources = pl330_alloc_chan_resources; diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 3c982c96b4b7..5072a7d306d4 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -865,6 +865,20 @@ bailout: return rc; } +static void hidma_shutdown(struct platform_device *pdev) +{ + struct hidma_dev *dmadev = platform_get_drvdata(pdev); + + dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n"); + + pm_runtime_get_sync(dmadev->ddev.dev); + if (hidma_ll_disable(dmadev->lldev)) + dev_warn(dmadev->ddev.dev, "channel did not stop\n"); + pm_runtime_mark_last_busy(dmadev->ddev.dev); + pm_runtime_put_autosuspend(dmadev->ddev.dev); + +} + static int hidma_remove(struct platform_device *pdev) { struct hidma_dev *dmadev = platform_get_drvdata(pdev); @@ -908,6 +922,7 @@ MODULE_DEVICE_TABLE(of, hidma_match); static struct platform_driver hidma_driver = { .probe = hidma_probe, .remove = hidma_remove, + .shutdown = hidma_shutdown, .driver = { .name = "hidma", .of_match_table = hidma_match, diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c index 6645bdf0d151..1530a661518d 100644 --- a/drivers/dma/qcom/hidma_ll.c +++ b/drivers/dma/qcom/hidma_ll.c @@ -499,6 +499,9 @@ int hidma_ll_enable(struct hidma_lldev *lldev) lldev->trch_state = HIDMA_CH_ENABLED; lldev->evch_state = HIDMA_CH_ENABLED; + /* enable irqs */ + writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); + return 0; } @@ -596,6 +599,9 @@ int hidma_ll_disable(struct hidma_lldev *lldev) lldev->trch_state = HIDMA_CH_SUSPENDED; lldev->evch_state = HIDMA_CH_SUSPENDED; + + /* disable interrupts */ + writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); return 0; } diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 48b22d5c8602..db41795fe42a 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -344,13 +344,19 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); if (desc->hwdescs.use) { - struct rcar_dmac_xfer_chunk *chunk; + struct rcar_dmac_xfer_chunk *chunk = + list_first_entry(&desc->chunks, + struct rcar_dmac_xfer_chunk, node); dev_dbg(chan->chan.device->dev, "chan%u: queue desc %p: %u@%pad\n", chan->index, desc, desc->nchunks, &desc->hwdescs.dma); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, + chunk->src_addr >> 32); + rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, + chunk->dst_addr >> 32); rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE, desc->hwdescs.dma >> 32); #endif @@ -368,8 +374,6 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) * should. Initialize it manually with the destination address * of the first chunk. */ - chunk = list_first_entry(&desc->chunks, - struct rcar_dmac_xfer_chunk, node); rcar_dmac_chan_write(chan, RCAR_DMADAR, chunk->dst_addr & 0xffffffff); @@ -855,8 +859,12 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, unsigned int nchunks = 0; unsigned int max_chunk_size; unsigned int full_size = 0; - bool highmem = false; + bool cross_boundary = false; unsigned int i; +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + u32 high_dev_addr; + u32 high_mem_addr; +#endif desc = rcar_dmac_desc_get(chan); if (!desc) @@ -882,6 +890,16 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, full_size += len; +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + if (i == 0) { + high_dev_addr = dev_addr >> 32; + high_mem_addr = mem_addr >> 32; + } + + if ((dev_addr >> 32 != high_dev_addr) || + (mem_addr >> 32 != high_mem_addr)) + cross_boundary = true; +#endif while (len) { unsigned int size = min(len, max_chunk_size); @@ -890,18 +908,14 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, * Prevent individual transfers from crossing 4GB * boundaries. */ - if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) + if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) { size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; - if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) + cross_boundary = true; + } + if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) { size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; - - /* - * Check if either of the source or destination address - * can't be expressed in 32 bits. If so we can't use - * hardware descriptor lists. - */ - if (dev_addr >> 32 || mem_addr >> 32) - highmem = true; + cross_boundary = true; + } #endif chunk = rcar_dmac_xfer_chunk_get(chan); @@ -943,13 +957,11 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, * Use hardware descriptor lists if possible when more than one chunk * needs to be transferred (otherwise they don't make much sense). * - * The highmem check currently covers the whole transfer. As an - * optimization we could use descriptor lists for consecutive lowmem - * chunks and direct manual mode for highmem chunks. Whether the - * performance improvement would be significant enough compared to the - * additional complexity remains to be investigated. + * Source/Destination address should be located in same 4GiB region + * in the 40bit address space when it uses Hardware descriptor, + * and cross_boundary is checking it. */ - desc->hwdescs.use = !highmem && nchunks > 1; + desc->hwdescs.use = !cross_boundary && nchunks > 1; if (desc->hwdescs.use) { if (rcar_dmac_fill_hwdesc(chan, desc) < 0) desc->hwdescs.use = false; diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 49f86cabcfec..786fc8fcc38e 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -1008,7 +1008,7 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, c = dma_get_slave_channel(&chan->vchan.chan); if (!c) { - dev_err(dev, "No more channel avalaible\n"); + dev_err(dev, "No more channels available\n"); return NULL; } diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index 57aa227bfadb..f4ed3f17607c 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c @@ -238,7 +238,7 @@ static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv, } spin_lock_irqsave(&priv->lock, flags); - for_each_clear_bit_from(i, &priv->pchans_used, max) { + for_each_clear_bit_from(i, priv->pchans_used, max) { pchan = &pchans[i]; pchan->vchan = vchan; set_bit(i, priv->pchans_used); diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index e47fc9b0944f..545e97279083 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -86,7 +86,7 @@ EXPORT_SYMBOL_GPL(vchan_find_desc); static void vchan_complete(unsigned long arg) { struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; - struct virt_dma_desc *vd; + struct virt_dma_desc *vd, *_vd; struct dmaengine_desc_callback cb; LIST_HEAD(head); @@ -103,8 +103,7 @@ static void vchan_complete(unsigned long arg) dmaengine_desc_callback_invoke(&cb, NULL); - while (!list_empty(&head)) { - vd = list_first_entry(&head, struct virt_dma_desc, node); + list_for_each_entry_safe(vd, _vd, &head, node) { dmaengine_desc_get_callback(&vd->tx, &cb); list_del(&vd->node); @@ -119,9 +118,9 @@ static void vchan_complete(unsigned long arg) void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) { - while (!list_empty(head)) { - struct virt_dma_desc *vd = list_first_entry(head, - struct virt_dma_desc, node); + struct virt_dma_desc *vd, *_vd; + + list_for_each_entry_safe(vd, _vd, head, node) { if (dmaengine_desc_test_reuse(&vd->tx)) { list_move_tail(&vd->node, &vc->desc_allocated); } else { diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 8288fe4d17c3..8cf87b1a284b 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -331,6 +331,7 @@ struct xilinx_dma_tx_descriptor { * @seg_v: Statically allocated segments base * @cyclic_seg_v: Statically allocated segment base for cyclic transfers * @start_transfer: Differentiate b/w DMA IP's transfer + * @stop_transfer: Differentiate b/w DMA IP's quiesce */ struct xilinx_dma_chan { struct xilinx_dma_device *xdev; @@ -361,6 +362,7 @@ struct xilinx_dma_chan { struct xilinx_axidma_tx_segment *seg_v; struct xilinx_axidma_tx_segment *cyclic_seg_v; void (*start_transfer)(struct xilinx_dma_chan *chan); + int (*stop_transfer)(struct xilinx_dma_chan *chan); u16 tdest; }; @@ -946,26 +948,32 @@ static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan) } /** - * xilinx_dma_halt - Halt DMA channel + * xilinx_dma_stop_transfer - Halt DMA channel * @chan: Driver specific DMA channel */ -static void xilinx_dma_halt(struct xilinx_dma_chan *chan) +static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan) { - int err; u32 val; dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); /* Wait for the hardware to halt */ - err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, - (val & XILINX_DMA_DMASR_HALTED), 0, - XILINX_DMA_LOOP_COUNT); + return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, + val & XILINX_DMA_DMASR_HALTED, 0, + XILINX_DMA_LOOP_COUNT); +} - if (err) { - dev_err(chan->dev, "Cannot stop channel %p: %x\n", - chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); - chan->err = true; - } +/** + * xilinx_cdma_stop_transfer - Wait for the current transfer to complete + * @chan: Driver specific DMA channel + */ +static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan) +{ + u32 val; + + return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, + val & XILINX_DMA_DMASR_IDLE, 0, + XILINX_DMA_LOOP_COUNT); } /** @@ -1653,7 +1661,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, { struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); struct xilinx_dma_tx_descriptor *desc; - struct xilinx_cdma_tx_segment *segment, *prev; + struct xilinx_cdma_tx_segment *segment; struct xilinx_cdma_desc_hw *hw; if (!len || len > XILINX_DMA_MAX_TRANS_LEN) @@ -1680,21 +1688,11 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, hw->dest_addr_msb = upper_32_bits(dma_dst); } - /* Fill the previous next descriptor with current */ - prev = list_last_entry(&desc->segments, - struct xilinx_cdma_tx_segment, node); - prev->hw.next_desc = segment->phys; - /* Insert the segment into the descriptor segments list. */ list_add_tail(&segment->node, &desc->segments); - prev = segment; - - /* Link the last hardware descriptor with the first. */ - segment = list_first_entry(&desc->segments, - struct xilinx_cdma_tx_segment, node); desc->async_tx.phys = segment->phys; - prev->hw.next_desc = segment->phys; + hw->next_desc = segment->phys; return &desc->async_tx; @@ -2003,12 +2001,17 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan) { struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); u32 reg; + int err; if (chan->cyclic) xilinx_dma_chan_reset(chan); - /* Halt the DMA engine */ - xilinx_dma_halt(chan); + err = chan->stop_transfer(chan); + if (err) { + dev_err(chan->dev, "Cannot stop channel %p: %x\n", + chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); + chan->err = true; + } /* Remove and free all of the descriptors in the lists */ xilinx_dma_free_descriptors(chan); @@ -2397,12 +2400,16 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, return err; } - if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { chan->start_transfer = xilinx_dma_start_transfer; - else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) + chan->stop_transfer = xilinx_dma_stop_transfer; + } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { chan->start_transfer = xilinx_cdma_start_transfer; - else + chan->stop_transfer = xilinx_cdma_stop_transfer; + } else { chan->start_transfer = xilinx_vdma_start_transfer; + chan->stop_transfer = xilinx_dma_stop_transfer; + } /* Initialize the tasklet */ tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c index 61b50c40b87b..598e209efa2d 100644 --- a/drivers/gpio/gpio-104-dio-48e.c +++ b/drivers/gpio/gpio-104-dio-48e.c @@ -33,11 +33,11 @@ static unsigned int base[MAX_NUM_DIO48E]; static unsigned int num_dio48e; -module_param_array(base, uint, &num_dio48e, 0); +module_param_hw_array(base, uint, ioport, &num_dio48e, 0); MODULE_PARM_DESC(base, "ACCES 104-DIO-48E base addresses"); static unsigned int irq[MAX_NUM_DIO48E]; -module_param_array(irq, uint, NULL, 0); +module_param_hw_array(irq, uint, irq, NULL, 0); MODULE_PARM_DESC(irq, "ACCES 104-DIO-48E interrupt line numbers"); /** diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c index 337c048168d8..51f046e29ff7 100644 --- a/drivers/gpio/gpio-104-idi-48.c +++ b/drivers/gpio/gpio-104-idi-48.c @@ -33,11 +33,11 @@ static unsigned int base[MAX_NUM_IDI_48]; static unsigned int num_idi_48; -module_param_array(base, uint, &num_idi_48, 0); +module_param_hw_array(base, uint, ioport, &num_idi_48, 0); MODULE_PARM_DESC(base, "ACCES 104-IDI-48 base addresses"); static unsigned int irq[MAX_NUM_IDI_48]; -module_param_array(irq, uint, NULL, 0); +module_param_hw_array(irq, uint, irq, NULL, 0); MODULE_PARM_DESC(irq, "ACCES 104-IDI-48 interrupt line numbers"); /** diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c index 5281e1cedb01..ec2ce34ff473 100644 --- a/drivers/gpio/gpio-104-idio-16.c +++ b/drivers/gpio/gpio-104-idio-16.c @@ -33,11 +33,11 @@ static unsigned int base[MAX_NUM_IDIO_16]; static unsigned int num_idio_16; -module_param_array(base, uint, &num_idio_16, 0); +module_param_hw_array(base, uint, ioport, &num_idio_16, 0); MODULE_PARM_DESC(base, "ACCES 104-IDIO-16 base addresses"); static unsigned int irq[MAX_NUM_IDIO_16]; -module_param_array(irq, uint, NULL, 0); +module_param_hw_array(irq, uint, irq, NULL, 0); MODULE_PARM_DESC(irq, "ACCES 104-IDIO-16 interrupt line numbers"); /** diff --git a/drivers/gpio/gpio-gpio-mm.c b/drivers/gpio/gpio-gpio-mm.c index fa4baa2543db..11ade5b288f8 100644 --- a/drivers/gpio/gpio-gpio-mm.c +++ b/drivers/gpio/gpio-gpio-mm.c @@ -31,7 +31,7 @@ static unsigned int base[MAX_NUM_GPIOMM]; static unsigned int num_gpiomm; -module_param_array(base, uint, &num_gpiomm, 0); +module_param_hw_array(base, uint, ioport, &num_gpiomm, 0); MODULE_PARM_DESC(base, "Diamond Systems GPIO-MM base addresses"); /** diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c index 87d63695dfcf..5037974ac063 100644 --- a/drivers/gpio/gpio-ws16c48.c +++ b/drivers/gpio/gpio-ws16c48.c @@ -30,11 +30,11 @@ static unsigned int base[MAX_NUM_WS16C48]; static unsigned int num_ws16c48; -module_param_array(base, uint, &num_ws16c48, 0); +module_param_hw_array(base, uint, ioport, &num_ws16c48, 0); MODULE_PARM_DESC(base, "WinSystems WS16C48 base addresses"); static unsigned int irq[MAX_NUM_WS16C48]; -module_param_array(irq, uint, NULL, 0); +module_param_hw_array(irq, uint, irq, NULL, 0); MODULE_PARM_DESC(irq, "WinSystems WS16C48 interrupt line numbers"); /** diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 532a577ff7a1..b6ac3df18b58 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4789,7 +4789,7 @@ i915_gem_load_init(struct drm_i915_private *dev_priv) dev_priv->requests = KMEM_CACHE(drm_i915_gem_request, SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | - SLAB_DESTROY_BY_RCU); + SLAB_TYPESAFE_BY_RCU); if (!dev_priv->requests) goto err_vmas; diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index a211c53c813f..129c58bb4805 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -521,7 +521,7 @@ static inline struct drm_i915_gem_request * __i915_gem_active_get_rcu(const struct i915_gem_active *active) { /* Performing a lockless retrieval of the active request is super - * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing + * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing * slab of request objects will not be freed whilst we hold the * RCU read lock. It does not guarantee that the request itself * will not be freed and then *reused*. Viz, diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 6a8258eacdcb..9f24c5da3f8d 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -174,7 +174,7 @@ struct drm_i915_private *mock_gem_device(void) i915->requests = KMEM_CACHE(mock_request, SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | - SLAB_DESTROY_BY_RCU); + SLAB_TYPESAFE_BY_RCU); if (!i915->requests) goto err_vmas; diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 491866865c33..1e1c90b30d4a 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -175,8 +175,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) DRM_INFO("virgl 3d acceleration not supported by guest\n"); #endif - ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs, - callbacks, names, NULL); + ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL); if (ret) { DRM_ERROR("failed to find virt queues\n"); goto err_vqs; diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c index 45c5c4883022..6e6bf46bcb52 100644 --- a/drivers/i2c/busses/i2c-ali15x3.c +++ b/drivers/i2c/busses/i2c-ali15x3.c @@ -119,7 +119,7 @@ /* If force_addr is set to anything different from 0, we forcibly enable the device at the given address. */ static u16 force_addr; -module_param(force_addr, ushort, 0); +module_param_hw(force_addr, ushort, ioport, 0); MODULE_PARM_DESC(force_addr, "Initialize the base address of the i2c controller"); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 5a4eb6b6bd92..f2acd4b6bf01 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -157,6 +157,8 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = { { "AMDI0010", ACCESS_INTR_MASK }, { "AMDI0510", 0 }, { "APMC0D0F", 0 }, + { "HISI02A1", 0 }, + { "HISI02A2", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c index 8af62fb3fe41..5416003e0605 100644 --- a/drivers/i2c/busses/i2c-elektor.c +++ b/drivers/i2c/busses/i2c-elektor.c @@ -323,9 +323,9 @@ MODULE_AUTHOR("Hans Berglund <hb@spacetec.no>"); MODULE_DESCRIPTION("I2C-Bus adapter routines for PCF8584 ISA bus adapter"); MODULE_LICENSE("GPL"); -module_param(base, int, 0); -module_param(irq, int, 0); +module_param_hw(base, int, ioport_or_iomem, 0); +module_param_hw(irq, int, irq, 0); module_param(clock, int, 0); module_param(own, int, 0); -module_param(mmapped, int, 0); +module_param_hw(mmapped, int, other, 0); module_isa_driver(i2c_elektor_driver, 1); diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c index 1bcdd10b68b9..faa8fb8f2b8f 100644 --- a/drivers/i2c/busses/i2c-parport-light.c +++ b/drivers/i2c/busses/i2c-parport-light.c @@ -38,11 +38,11 @@ static struct platform_device *pdev; static u16 base; -module_param(base, ushort, 0); +module_param_hw(base, ushort, ioport, 0); MODULE_PARM_DESC(base, "Base I/O address"); static int irq; -module_param(irq, int, 0); +module_param_hw(irq, int, irq, 0); MODULE_PARM_DESC(irq, "IRQ (optional)"); /* ----- Low-level parallel port access ----------------------------------- */ diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c index ba88f17f636c..946ac646de2a 100644 --- a/drivers/i2c/busses/i2c-pca-isa.c +++ b/drivers/i2c/busses/i2c-pca-isa.c @@ -197,9 +197,9 @@ MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>"); MODULE_DESCRIPTION("ISA base PCA9564/PCA9665 driver"); MODULE_LICENSE("GPL"); -module_param(base, ulong, 0); +module_param_hw(base, ulong, ioport, 0); MODULE_PARM_DESC(base, "I/O base address"); -module_param(irq, int, 0); +module_param_hw(irq, int, irq, 0); MODULE_PARM_DESC(irq, "IRQ"); module_param(clock, int, 0); MODULE_PARM_DESC(clock, "Clock rate in hertz.\n\t\t" diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index c21ca7bf2efe..0ecdb47a23ab 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -106,7 +106,7 @@ MODULE_PARM_DESC(force, "Forcibly enable the PIIX4. DANGEROUS!"); /* If force_addr is set to anything different from 0, we forcibly enable the PIIX4 at the given address. VERY DANGEROUS! */ static int force_addr; -module_param (force_addr, int, 0); +module_param_hw(force_addr, int, ioport, 0); MODULE_PARM_DESC(force_addr, "Forcibly enable the PIIX4 at the given address. " "EXTREMELY DANGEROUS!"); diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c index 7d58a40faf2d..d543a9867ba4 100644 --- a/drivers/i2c/busses/i2c-sis5595.c +++ b/drivers/i2c/busses/i2c-sis5595.c @@ -119,7 +119,7 @@ static int blacklist[] = { /* If force_addr is set to anything different from 0, we forcibly enable the device at the given address. */ static u16 force_addr; -module_param(force_addr, ushort, 0); +module_param_hw(force_addr, ushort, ioport, 0); MODULE_PARM_DESC(force_addr, "Initialize the base address of the i2c controller"); static struct pci_driver sis5595_driver; diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c index 0ee2646f3b00..0dc45e12bb1d 100644 --- a/drivers/i2c/busses/i2c-viapro.c +++ b/drivers/i2c/busses/i2c-viapro.c @@ -94,7 +94,7 @@ MODULE_PARM_DESC(force, "Forcibly enable the SMBus. DANGEROUS!"); /* If force_addr is set to anything different from 0, we forcibly enable the VT596 at the given address. VERY DANGEROUS! */ static u16 force_addr; -module_param(force_addr, ushort, 0); +module_param_hw(force_addr, ushort, ioport, 0); MODULE_PARM_DESC(force_addr, "Forcibly enable the SMBus at the given address. " "EXTREMELY DANGEROUS!"); diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c index 0a7e410b6195..e0923bee8d1f 100644 --- a/drivers/i2c/busses/scx200_acb.c +++ b/drivers/i2c/busses/scx200_acb.c @@ -42,7 +42,7 @@ MODULE_LICENSE("GPL"); #define MAX_DEVICES 4 static int base[MAX_DEVICES] = { 0x820, 0x840 }; -module_param_array(base, int, NULL, 0); +module_param_hw_array(base, int, ioport, NULL, 0); MODULE_PARM_DESC(base, "Base addresses for the ACCESS.bus controllers"); #define POLL_TIMEOUT (HZ/5) diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 45b3f41a43d4..323af721f8cb 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -107,7 +107,7 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err) if (cmd->tf_flags & IDE_TFLAG_DYN) kfree(orig_cmd); - else + else if (cmd != orig_cmd) memcpy(orig_cmd, cmd, sizeof(*cmd)); } } diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index a74ae8df4bb8..023562565d11 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -1183,9 +1183,7 @@ static void ide_init_port_data(ide_hwif_t *hwif, unsigned int index) spin_lock_init(&hwif->lock); - init_timer(&hwif->timer); - hwif->timer.function = &ide_timer_expiry; - hwif->timer.data = (unsigned long)hwif; + setup_timer(&hwif->timer, &ide_timer_expiry, (unsigned long)hwif); init_completion(&hwif->gendev_rel_comp); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 5805b041dd0f..216d7ec88c0c 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1097,6 +1097,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl), ICPU(INTEL_FAM6_XEON_PHI_KNM, idle_cpu_knl), ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt), + ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, idle_cpu_bxt), ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv), {} }; @@ -1309,6 +1310,7 @@ static void intel_idle_state_table_update(void) ivt_idle_state_table_update(); break; case INTEL_FAM6_ATOM_GOLDMONT: + case INTEL_FAM6_ATOM_GEMINI_LAKE: bxt_idle_state_table_update(); break; case INTEL_FAM6_SKYLAKE_DESKTOP: diff --git a/drivers/iio/adc/stx104.c b/drivers/iio/adc/stx104.c index 2df84fa5e3fc..2da741d27540 100644 --- a/drivers/iio/adc/stx104.c +++ b/drivers/iio/adc/stx104.c @@ -49,7 +49,7 @@ static unsigned int base[max_num_isa_dev(STX104_EXTENT)]; static unsigned int num_stx104; -module_param_array(base, uint, &num_stx104, 0); +module_param_hw_array(base, uint, ioport, &num_stx104, 0); MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses"); /** diff --git a/drivers/iio/dac/cio-dac.c b/drivers/iio/dac/cio-dac.c index a0464227a3a0..a8dffd938615 100644 --- a/drivers/iio/dac/cio-dac.c +++ b/drivers/iio/dac/cio-dac.c @@ -39,7 +39,7 @@ static unsigned int base[max_num_isa_dev(CIO_DAC_EXTENT)]; static unsigned int num_cio_dac; -module_param_array(base, uint, &num_cio_dac, 0); +module_param_hw_array(base, uint, ioport, &num_cio_dac, 0); MODULE_PARM_DESC(base, "Measurement Computing CIO-DAC base addresses"); /** diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index ef11e770f822..6a72095d6c7a 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -35,6 +35,7 @@ #include <rdma/ib_user_verbs.h> #include <linux/netdevice.h> #include <linux/iommu.h> +#include <linux/pci.h> #include <net/addrconf.h> #include <linux/qed/qede_roce.h> #include <linux/qed/qed_chain.h> diff --git a/drivers/input/mouse/inport.c b/drivers/input/mouse/inport.c index 3827a22362de..9ce71dfa0de1 100644 --- a/drivers/input/mouse/inport.c +++ b/drivers/input/mouse/inport.c @@ -78,7 +78,7 @@ MODULE_LICENSE("GPL"); #define INPORT_IRQ 5 static int inport_irq = INPORT_IRQ; -module_param_named(irq, inport_irq, uint, 0); +module_param_hw_named(irq, inport_irq, uint, irq, 0); MODULE_PARM_DESC(irq, "IRQ number (5=default)"); static struct input_dev *inport_dev; diff --git a/drivers/input/mouse/logibm.c b/drivers/input/mouse/logibm.c index e2413113df22..6f165e053f4d 100644 --- a/drivers/input/mouse/logibm.c +++ b/drivers/input/mouse/logibm.c @@ -69,7 +69,7 @@ MODULE_LICENSE("GPL"); #define LOGIBM_IRQ 5 static int logibm_irq = LOGIBM_IRQ; -module_param_named(irq, logibm_irq, uint, 0); +module_param_hw_named(irq, logibm_irq, uint, irq, 0); MODULE_PARM_DESC(irq, "IRQ number (5=default)"); static struct input_dev *logibm_dev; diff --git a/drivers/input/touchscreen/mk712.c b/drivers/input/touchscreen/mk712.c index 36e57deacd03..bd5352824f77 100644 --- a/drivers/input/touchscreen/mk712.c +++ b/drivers/input/touchscreen/mk712.c @@ -50,11 +50,11 @@ MODULE_DESCRIPTION("ICS MicroClock MK712 TouchScreen driver"); MODULE_LICENSE("GPL"); static unsigned int mk712_io = 0x260; /* Also 0x200, 0x208, 0x300 */ -module_param_named(io, mk712_io, uint, 0); +module_param_hw_named(io, mk712_io, uint, ioport, 0); MODULE_PARM_DESC(io, "I/O base address of MK712 touchscreen controller"); static unsigned int mk712_irq = 10; /* Also 12, 14, 15 */ -module_param_named(irq, mk712_irq, uint, 0); +module_param_hw_named(irq, mk712_irq, uint, irq, 0); MODULE_PARM_DESC(irq, "IRQ of MK712 touchscreen controller"); /* eight 8-bit registers */ diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 063343909b0d..6629c472eafd 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -696,9 +696,9 @@ out_clear_state: out_unregister: mmu_notifier_unregister(&pasid_state->mn, mm); + mmput(mm); out_free: - mmput(mm); free_pasid_state(pasid_state); out: diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 591bb96047c9..380969aa60d5 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -554,9 +554,14 @@ struct arm_smmu_s2_cfg { }; struct arm_smmu_strtab_ent { - bool valid; - - bool bypass; /* Overrides s1/s2 config */ + /* + * An STE is "assigned" if the master emitting the corresponding SID + * is attached to a domain. The behaviour of an unassigned STE is + * determined by the disable_bypass parameter, whereas an assigned + * STE behaves according to s1_cfg/s2_cfg, which themselves are + * configured according to the domain type. + */ + bool assigned; struct arm_smmu_s1_cfg *s1_cfg; struct arm_smmu_s2_cfg *s2_cfg; }; @@ -632,6 +637,7 @@ enum arm_smmu_domain_stage { ARM_SMMU_DOMAIN_S1 = 0, ARM_SMMU_DOMAIN_S2, ARM_SMMU_DOMAIN_NESTED, + ARM_SMMU_DOMAIN_BYPASS, }; struct arm_smmu_domain { @@ -1005,9 +1011,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, * This is hideously complicated, but we only really care about * three cases at the moment: * - * 1. Invalid (all zero) -> bypass (init) - * 2. Bypass -> translation (attach) - * 3. Translation -> bypass (detach) + * 1. Invalid (all zero) -> bypass/fault (init) + * 2. Bypass/fault -> translation/bypass (attach) + * 3. Translation/bypass -> bypass/fault (detach) * * Given that we can't update the STE atomically and the SMMU * doesn't read the thing in a defined order, that leaves us @@ -1046,11 +1052,15 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, } /* Nuke the existing STE_0 value, as we're going to rewrite it */ - val = ste->valid ? STRTAB_STE_0_V : 0; + val = STRTAB_STE_0_V; + + /* Bypass/fault */ + if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) { + if (!ste->assigned && disable_bypass) + val |= STRTAB_STE_0_CFG_ABORT; + else + val |= STRTAB_STE_0_CFG_BYPASS; - if (ste->bypass) { - val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT - : STRTAB_STE_0_CFG_BYPASS; dst[0] = cpu_to_le64(val); dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING << STRTAB_STE_1_SHCFG_SHIFT); @@ -1111,10 +1121,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent) { unsigned int i; - struct arm_smmu_strtab_ent ste = { - .valid = true, - .bypass = true, - }; + struct arm_smmu_strtab_ent ste = { .assigned = false }; for (i = 0; i < nent; ++i) { arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste); @@ -1378,7 +1385,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) { struct arm_smmu_domain *smmu_domain; - if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) + if (type != IOMMU_DOMAIN_UNMANAGED && + type != IOMMU_DOMAIN_DMA && + type != IOMMU_DOMAIN_IDENTITY) return NULL; /* @@ -1509,6 +1518,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_device *smmu = smmu_domain->smmu; + if (domain->type == IOMMU_DOMAIN_IDENTITY) { + smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; + return 0; + } + /* Restrict the stage to what we can actually support */ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) smmu_domain->stage = ARM_SMMU_DOMAIN_S2; @@ -1579,7 +1593,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) return step; } -static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) +static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) { int i; struct arm_smmu_master_data *master = fwspec->iommu_priv; @@ -1591,17 +1605,14 @@ static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste); } - - return 0; } static void arm_smmu_detach_dev(struct device *dev) { struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv; - master->ste.bypass = true; - if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0) - dev_warn(dev, "failed to install bypass STE\n"); + master->ste.assigned = false; + arm_smmu_install_ste_for_dev(dev->iommu_fwspec); } static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) @@ -1620,7 +1631,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ste = &master->ste; /* Already attached to a different domain? */ - if (!ste->bypass) + if (ste->assigned) arm_smmu_detach_dev(dev); mutex_lock(&smmu_domain->init_mutex); @@ -1641,10 +1652,12 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) goto out_unlock; } - ste->bypass = false; - ste->valid = true; + ste->assigned = true; - if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { + if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) { + ste->s1_cfg = NULL; + ste->s2_cfg = NULL; + } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { ste->s1_cfg = &smmu_domain->s1_cfg; ste->s2_cfg = NULL; arm_smmu_write_ctx_desc(smmu, ste->s1_cfg); @@ -1653,10 +1666,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ste->s2_cfg = &smmu_domain->s2_cfg; } - ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec); - if (ret < 0) - ste->valid = false; - + arm_smmu_install_ste_for_dev(dev->iommu_fwspec); out_unlock: mutex_unlock(&smmu_domain->init_mutex); return ret; @@ -1704,6 +1714,9 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; + if (domain->type == IOMMU_DOMAIN_IDENTITY) + return iova; + if (!ops) return 0; @@ -1807,7 +1820,7 @@ static void arm_smmu_remove_device(struct device *dev) master = fwspec->iommu_priv; smmu = master->smmu; - if (master && master->ste.valid) + if (master && master->ste.assigned) arm_smmu_detach_dev(dev); iommu_group_remove_device(dev); iommu_device_unlink(&smmu->iommu, dev); @@ -1837,6 +1850,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + if (domain->type != IOMMU_DOMAIN_UNMANAGED) + return -EINVAL; + switch (attr) { case DOMAIN_ATTR_NESTING: *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); @@ -1852,6 +1868,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, int ret = 0; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + if (domain->type != IOMMU_DOMAIN_UNMANAGED) + return -EINVAL; + mutex_lock(&smmu_domain->init_mutex); switch (attr) { @@ -1893,6 +1912,8 @@ static void arm_smmu_get_resv_regions(struct device *dev, return; list_add_tail(®ion->list, head); + + iommu_dma_get_resv_regions(dev, head); } static void arm_smmu_put_resv_regions(struct device *dev, @@ -2761,51 +2782,9 @@ static struct platform_driver arm_smmu_driver = { .probe = arm_smmu_device_probe, .remove = arm_smmu_device_remove, }; +module_platform_driver(arm_smmu_driver); -static int __init arm_smmu_init(void) -{ - static bool registered; - int ret = 0; - - if (!registered) { - ret = platform_driver_register(&arm_smmu_driver); - registered = !ret; - } - return ret; -} - -static void __exit arm_smmu_exit(void) -{ - return platform_driver_unregister(&arm_smmu_driver); -} - -subsys_initcall(arm_smmu_init); -module_exit(arm_smmu_exit); - -static int __init arm_smmu_of_init(struct device_node *np) -{ - int ret = arm_smmu_init(); - - if (ret) - return ret; - - if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root)) - return -ENODEV; - - return 0; -} -IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init); - -#ifdef CONFIG_ACPI -static int __init acpi_smmu_v3_init(struct acpi_table_header *table) -{ - if (iort_node_match(ACPI_IORT_NODE_SMMU_V3)) - return arm_smmu_init(); - - return 0; -} -IORT_ACPI_DECLARE(arm_smmu_v3, ACPI_SIG_IORT, acpi_smmu_v3_init); -#endif +IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", NULL); MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations"); MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index b493c99e17f7..7ec30b08b3bd 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -162,6 +162,7 @@ #define ARM_SMMU_GR0_sTLBGSTATUS 0x74 #define sTLBGSTATUS_GSACTIVE (1 << 0) #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ +#define TLB_SPIN_COUNT 10 /* Stream mapping registers */ #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) @@ -216,8 +217,7 @@ enum arm_smmu_s2cr_privcfg { #define CBA2R_VMID_MASK 0xffff /* Translation context bank */ -#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1)) -#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift)) +#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift)) #define ARM_SMMU_CB_SCTLR 0x0 #define ARM_SMMU_CB_ACTLR 0x4 @@ -238,6 +238,8 @@ enum arm_smmu_s2cr_privcfg { #define ARM_SMMU_CB_S1_TLBIVAL 0x620 #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 +#define ARM_SMMU_CB_TLBSYNC 0x7f0 +#define ARM_SMMU_CB_TLBSTATUS 0x7f4 #define ARM_SMMU_CB_ATS1PR 0x800 #define ARM_SMMU_CB_ATSR 0x8f0 @@ -344,7 +346,7 @@ struct arm_smmu_device { struct device *dev; void __iomem *base; - unsigned long size; + void __iomem *cb_base; unsigned long pgshift; #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) @@ -404,18 +406,20 @@ enum arm_smmu_context_fmt { struct arm_smmu_cfg { u8 cbndx; u8 irptndx; + union { + u16 asid; + u16 vmid; + }; u32 cbar; enum arm_smmu_context_fmt fmt; }; #define INVALID_IRPTNDX 0xff -#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx) -#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1) - enum arm_smmu_domain_stage { ARM_SMMU_DOMAIN_S1 = 0, ARM_SMMU_DOMAIN_S2, ARM_SMMU_DOMAIN_NESTED, + ARM_SMMU_DOMAIN_BYPASS, }; struct arm_smmu_domain { @@ -569,49 +573,67 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx) } /* Wait for any pending TLB invalidations to complete */ -static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu) +static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, + void __iomem *sync, void __iomem *status) { - int count = 0; - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC); - while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS) - & sTLBGSTATUS_GSACTIVE) { - cpu_relax(); - if (++count == TLB_LOOP_TIMEOUT) { - dev_err_ratelimited(smmu->dev, - "TLB sync timed out -- SMMU may be deadlocked\n"); - return; + unsigned int spin_cnt, delay; + + writel_relaxed(0, sync); + for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) { + for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { + if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE)) + return; + cpu_relax(); } - udelay(1); + udelay(delay); } + dev_err_ratelimited(smmu->dev, + "TLB sync timed out -- SMMU may be deadlocked\n"); } -static void arm_smmu_tlb_sync(void *cookie) +static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) +{ + void __iomem *base = ARM_SMMU_GR0(smmu); + + __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC, + base + ARM_SMMU_GR0_sTLBGSTATUS); +} + +static void arm_smmu_tlb_sync_context(void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; - __arm_smmu_tlb_sync(smmu_domain->smmu); + struct arm_smmu_device *smmu = smmu_domain->smmu; + void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx); + + __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC, + base + ARM_SMMU_CB_TLBSTATUS); } -static void arm_smmu_tlb_inv_context(void *cookie) +static void arm_smmu_tlb_sync_vmid(void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + + arm_smmu_tlb_sync_global(smmu_domain->smmu); +} + +static void arm_smmu_tlb_inv_context_s1(void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - struct arm_smmu_device *smmu = smmu_domain->smmu; - bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; - void __iomem *base; + void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx); - if (stage1) { - base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); - writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg), - base + ARM_SMMU_CB_S1_TLBIASID); - } else { - base = ARM_SMMU_GR0(smmu); - writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), - base + ARM_SMMU_GR0_TLBIVMID); - } + writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID); + arm_smmu_tlb_sync_context(cookie); +} + +static void arm_smmu_tlb_inv_context_s2(void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + struct arm_smmu_device *smmu = smmu_domain->smmu; + void __iomem *base = ARM_SMMU_GR0(smmu); - __arm_smmu_tlb_sync(smmu); + writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); + arm_smmu_tlb_sync_global(smmu); } static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, @@ -619,31 +641,28 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, { struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - struct arm_smmu_device *smmu = smmu_domain->smmu; bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; - void __iomem *reg; + void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx); if (stage1) { - reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) { iova &= ~12UL; - iova |= ARM_SMMU_CB_ASID(smmu, cfg); + iova |= cfg->asid; do { writel_relaxed(iova, reg); iova += granule; } while (size -= granule); } else { iova >>= 12; - iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48; + iova |= (u64)cfg->asid << 48; do { writeq_relaxed(iova, reg); iova += granule >> 12; } while (size -= granule); } - } else if (smmu->version == ARM_SMMU_V2) { - reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); + } else { reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2; iova >>= 12; @@ -651,16 +670,40 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, smmu_write_atomic_lq(iova, reg); iova += granule >> 12; } while (size -= granule); - } else { - reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID; - writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg); } } -static const struct iommu_gather_ops arm_smmu_gather_ops = { - .tlb_flush_all = arm_smmu_tlb_inv_context, +/* + * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears + * almost negligible, but the benefit of getting the first one in as far ahead + * of the sync as possible is significant, hence we don't just make this a + * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think. + */ +static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, + size_t granule, bool leaf, void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu); + + writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); +} + +static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = { + .tlb_flush_all = arm_smmu_tlb_inv_context_s1, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, - .tlb_sync = arm_smmu_tlb_sync, + .tlb_sync = arm_smmu_tlb_sync_context, +}; + +static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = { + .tlb_flush_all = arm_smmu_tlb_inv_context_s2, + .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, + .tlb_sync = arm_smmu_tlb_sync_context, +}; + +static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = { + .tlb_flush_all = arm_smmu_tlb_inv_context_s2, + .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync, + .tlb_sync = arm_smmu_tlb_sync_vmid, }; static irqreturn_t arm_smmu_context_fault(int irq, void *dev) @@ -673,7 +716,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) struct arm_smmu_device *smmu = smmu_domain->smmu; void __iomem *cb_base; - cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); + cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); if (!(fsr & FSR_FAULT)) @@ -726,7 +769,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, gr1_base = ARM_SMMU_GR1(smmu); stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; - cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); + cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); if (smmu->version > ARM_SMMU_V1) { if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) @@ -735,7 +778,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, reg = CBA2R_RW64_32BIT; /* 16-bit VMIDs live in CBA2R */ if (smmu->features & ARM_SMMU_FEAT_VMID16) - reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT; + reg |= cfg->vmid << CBA2R_VMID_SHIFT; writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); } @@ -754,34 +797,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) { /* 8-bit VMIDs live in CBAR */ - reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT; + reg |= cfg->vmid << CBAR_VMID_SHIFT; } writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); - /* TTBRs */ - if (stage1) { - u16 asid = ARM_SMMU_CB_ASID(smmu, cfg); - - if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { - reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0]; - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0); - reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1]; - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1); - writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR); - } else { - reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; - reg64 |= (u64)asid << TTBRn_ASID_SHIFT; - writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); - reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; - reg64 |= (u64)asid << TTBRn_ASID_SHIFT; - writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1); - } - } else { - reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; - writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); - } - - /* TTBCR */ + /* + * TTBCR + * We must write this before the TTBRs, since it determines the + * access behaviour of some fields (in particular, ASID[15:8]). + */ if (stage1) { if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { reg = pgtbl_cfg->arm_v7s_cfg.tcr; @@ -800,6 +824,27 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, } writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); + /* TTBRs */ + if (stage1) { + if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { + reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0]; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0); + reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1]; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1); + writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR); + } else { + reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; + reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT; + writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); + reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; + reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT; + writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1); + } + } else { + reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; + writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); + } + /* MAIRs (stage-1 only) */ if (stage1) { if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { @@ -833,11 +878,18 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, enum io_pgtable_fmt fmt; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + const struct iommu_gather_ops *tlb_ops; mutex_lock(&smmu_domain->init_mutex); if (smmu_domain->smmu) goto out_unlock; + if (domain->type == IOMMU_DOMAIN_IDENTITY) { + smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; + smmu_domain->smmu = smmu; + goto out_unlock; + } + /* * Mapping the requested stage onto what we support is surprisingly * complicated, mainly because the spec allows S1+S2 SMMUs without @@ -904,6 +956,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ias = min(ias, 32UL); oas = min(oas, 32UL); } + tlb_ops = &arm_smmu_s1_tlb_ops; break; case ARM_SMMU_DOMAIN_NESTED: /* @@ -922,12 +975,15 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ias = min(ias, 40UL); oas = min(oas, 40UL); } + if (smmu->version == ARM_SMMU_V2) + tlb_ops = &arm_smmu_s2_tlb_ops_v2; + else + tlb_ops = &arm_smmu_s2_tlb_ops_v1; break; default: ret = -EINVAL; goto out_unlock; } - ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks); if (ret < 0) @@ -941,11 +997,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, cfg->irptndx = cfg->cbndx; } + if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2) + cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base; + else + cfg->asid = cfg->cbndx + smmu->cavium_id_base; + pgtbl_cfg = (struct io_pgtable_cfg) { .pgsize_bitmap = smmu->pgsize_bitmap, .ias = ias, .oas = oas, - .tlb = &arm_smmu_gather_ops, + .tlb = tlb_ops, .iommu_dev = smmu->dev, }; @@ -998,14 +1059,14 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) void __iomem *cb_base; int irq; - if (!smmu) + if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY) return; /* * Disable the context bank and free the page tables before freeing * it. */ - cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); + cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); if (cfg->irptndx != INVALID_IRPTNDX) { @@ -1021,7 +1082,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) { struct arm_smmu_domain *smmu_domain; - if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) + if (type != IOMMU_DOMAIN_UNMANAGED && + type != IOMMU_DOMAIN_DMA && + type != IOMMU_DOMAIN_IDENTITY) return NULL; /* * Allocate the domain and initialise some of its data structures. @@ -1250,10 +1313,15 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, { struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_s2cr *s2cr = smmu->s2crs; - enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS; u8 cbndx = smmu_domain->cfg.cbndx; + enum arm_smmu_s2cr_type type; int i, idx; + if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) + type = S2CR_TYPE_BYPASS; + else + type = S2CR_TYPE_TRANS; + for_each_cfg_sme(fwspec, i, idx) { if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) continue; @@ -1356,7 +1424,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, u64 phys; unsigned long va; - cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); + cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); /* ATS1 registers can only be written atomically */ va = iova & ~0xfffUL; @@ -1391,6 +1459,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; + if (domain->type == IOMMU_DOMAIN_IDENTITY) + return iova; + if (!ops) return 0; @@ -1467,7 +1538,7 @@ static int arm_smmu_add_device(struct device *dev) } if (mask & ~smmu->smr_mask_mask) { dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n", - sid, smmu->smr_mask_mask); + mask, smmu->smr_mask_mask); goto out_free; } } @@ -1549,6 +1620,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + if (domain->type != IOMMU_DOMAIN_UNMANAGED) + return -EINVAL; + switch (attr) { case DOMAIN_ATTR_NESTING: *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); @@ -1564,6 +1638,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, int ret = 0; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + if (domain->type != IOMMU_DOMAIN_UNMANAGED) + return -EINVAL; + mutex_lock(&smmu_domain->init_mutex); switch (attr) { @@ -1590,13 +1667,15 @@ out_unlock: static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) { - u32 fwid = 0; + u32 mask, fwid = 0; if (args->args_count > 0) fwid |= (u16)args->args[0]; if (args->args_count > 1) fwid |= (u16)args->args[1] << SMR_MASK_SHIFT; + else if (!of_property_read_u32(args->np, "stream-match-mask", &mask)) + fwid |= (u16)mask << SMR_MASK_SHIFT; return iommu_fwspec_add_ids(dev, &fwid, 1); } @@ -1613,6 +1692,8 @@ static void arm_smmu_get_resv_regions(struct device *dev, return; list_add_tail(®ion->list, head); + + iommu_dma_get_resv_regions(dev, head); } static void arm_smmu_put_resv_regions(struct device *dev, @@ -1683,7 +1764,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) /* Make sure all context banks are disabled and clear CB_FSR */ for (i = 0; i < smmu->num_context_banks; ++i) { - cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i); + cb_base = ARM_SMMU_CB(smmu, i); writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR); /* @@ -1729,7 +1810,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) reg |= sCR0_EXIDENABLE; /* Push the button */ - __arm_smmu_tlb_sync(smmu); + arm_smmu_tlb_sync_global(smmu); writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); } @@ -1863,11 +1944,11 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) /* Check for size mismatch of SMMU address space from mapped region */ size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); - size *= 2 << smmu->pgshift; - if (smmu->size != size) + size <<= smmu->pgshift; + if (smmu->cb_base != gr0_base + size) dev_warn(smmu->dev, - "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", - size, smmu->size); + "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n", + size * 2, (smmu->cb_base - gr0_base) * 2); smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; @@ -1887,6 +1968,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) atomic_add_return(smmu->num_context_banks, &cavium_smmu_context_count); smmu->cavium_id_base -= smmu->num_context_banks; + dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n"); } /* ID2 */ @@ -2075,6 +2157,23 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev, return 0; } +static void arm_smmu_bus_init(void) +{ + /* Oh, for a proper bus abstraction */ + if (!iommu_present(&platform_bus_type)) + bus_set_iommu(&platform_bus_type, &arm_smmu_ops); +#ifdef CONFIG_ARM_AMBA + if (!iommu_present(&amba_bustype)) + bus_set_iommu(&amba_bustype, &arm_smmu_ops); +#endif +#ifdef CONFIG_PCI + if (!iommu_present(&pci_bus_type)) { + pci_request_acs(); + bus_set_iommu(&pci_bus_type, &arm_smmu_ops); + } +#endif +} + static int arm_smmu_device_probe(struct platform_device *pdev) { struct resource *res; @@ -2103,7 +2202,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) smmu->base = devm_ioremap_resource(dev, res); if (IS_ERR(smmu->base)) return PTR_ERR(smmu->base); - smmu->size = resource_size(res); + smmu->cb_base = smmu->base + resource_size(res) / 2; num_irqs = 0; while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) { @@ -2180,21 +2279,30 @@ static int arm_smmu_device_probe(struct platform_device *pdev) arm_smmu_device_reset(smmu); arm_smmu_test_smr_masks(smmu); - /* Oh, for a proper bus abstraction */ - if (!iommu_present(&platform_bus_type)) - bus_set_iommu(&platform_bus_type, &arm_smmu_ops); -#ifdef CONFIG_ARM_AMBA - if (!iommu_present(&amba_bustype)) - bus_set_iommu(&amba_bustype, &arm_smmu_ops); -#endif -#ifdef CONFIG_PCI - if (!iommu_present(&pci_bus_type)) { - pci_request_acs(); - bus_set_iommu(&pci_bus_type, &arm_smmu_ops); - } -#endif + /* + * For ACPI and generic DT bindings, an SMMU will be probed before + * any device which might need it, so we want the bus ops in place + * ready to handle default domain setup as soon as any SMMU exists. + */ + if (!using_legacy_binding) + arm_smmu_bus_init(); + + return 0; +} + +/* + * With the legacy DT binding in play, though, we have no guarantees about + * probe order, but then we're also not doing default domains, so we can + * delay setting bus ops until we're sure every possible SMMU is ready, + * and that way ensure that no add_device() calls get missed. + */ +static int arm_smmu_legacy_bus_init(void) +{ + if (using_legacy_binding) + arm_smmu_bus_init(); return 0; } +device_initcall_sync(arm_smmu_legacy_bus_init); static int arm_smmu_device_remove(struct platform_device *pdev) { @@ -2219,56 +2327,14 @@ static struct platform_driver arm_smmu_driver = { .probe = arm_smmu_device_probe, .remove = arm_smmu_device_remove, }; - -static int __init arm_smmu_init(void) -{ - static bool registered; - int ret = 0; - - if (!registered) { - ret = platform_driver_register(&arm_smmu_driver); - registered = !ret; - } - return ret; -} - -static void __exit arm_smmu_exit(void) -{ - return platform_driver_unregister(&arm_smmu_driver); -} - -subsys_initcall(arm_smmu_init); -module_exit(arm_smmu_exit); - -static int __init arm_smmu_of_init(struct device_node *np) -{ - int ret = arm_smmu_init(); - - if (ret) - return ret; - - if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root)) - return -ENODEV; - - return 0; -} -IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init); -IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init); -IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init); -IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init); -IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init); -IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init); - -#ifdef CONFIG_ACPI -static int __init arm_smmu_acpi_init(struct acpi_table_header *table) -{ - if (iort_node_match(ACPI_IORT_NODE_SMMU)) - return arm_smmu_init(); - - return 0; -} -IORT_ACPI_DECLARE(arm_smmu, ACPI_SIG_IORT, arm_smmu_acpi_init); -#endif +module_platform_driver(arm_smmu_driver); + +IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", NULL); +IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", NULL); +IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", NULL); +IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", NULL); +IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", NULL); +IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", NULL); MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 48d36ce59efb..8348f366ddd1 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -61,15 +61,6 @@ static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) return PAGE_SIZE; } -static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) -{ - struct iommu_dma_cookie *cookie = domain->iova_cookie; - - if (cookie->type == IOMMU_DMA_IOVA_COOKIE) - return &cookie->iovad; - return NULL; -} - static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) { struct iommu_dma_cookie *cookie; @@ -167,22 +158,99 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) } EXPORT_SYMBOL(iommu_put_dma_cookie); -static void iova_reserve_pci_windows(struct pci_dev *dev, - struct iova_domain *iovad) +/** + * iommu_dma_get_resv_regions - Reserved region driver helper + * @dev: Device from iommu_get_resv_regions() + * @list: Reserved region list from iommu_get_resv_regions() + * + * IOMMU drivers can use this to implement their .get_resv_regions callback + * for general non-IOMMU-specific reservations. Currently, this covers host + * bridge windows for PCI devices. + */ +void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) { - struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); + struct pci_host_bridge *bridge; struct resource_entry *window; - unsigned long lo, hi; + if (!dev_is_pci(dev)) + return; + + bridge = pci_find_host_bridge(to_pci_dev(dev)->bus); resource_list_for_each_entry(window, &bridge->windows) { - if (resource_type(window->res) != IORESOURCE_MEM && - resource_type(window->res) != IORESOURCE_IO) + struct iommu_resv_region *region; + phys_addr_t start; + size_t length; + + if (resource_type(window->res) != IORESOURCE_MEM) + continue; + + start = window->res->start - window->offset; + length = window->res->end - window->res->start + 1; + region = iommu_alloc_resv_region(start, length, 0, + IOMMU_RESV_RESERVED); + if (!region) + return; + + list_add_tail(®ion->list, list); + } +} +EXPORT_SYMBOL(iommu_dma_get_resv_regions); + +static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, + phys_addr_t start, phys_addr_t end) +{ + struct iova_domain *iovad = &cookie->iovad; + struct iommu_dma_msi_page *msi_page; + int i, num_pages; + + start -= iova_offset(iovad, start); + num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); + + msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); + if (!msi_page) + return -ENOMEM; + + for (i = 0; i < num_pages; i++) { + msi_page[i].phys = start; + msi_page[i].iova = start; + INIT_LIST_HEAD(&msi_page[i].list); + list_add(&msi_page[i].list, &cookie->msi_page_list); + start += iovad->granule; + } + + return 0; +} + +static int iova_reserve_iommu_regions(struct device *dev, + struct iommu_domain *domain) +{ + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + struct iommu_resv_region *region; + LIST_HEAD(resv_regions); + int ret = 0; + + iommu_get_resv_regions(dev, &resv_regions); + list_for_each_entry(region, &resv_regions, list) { + unsigned long lo, hi; + + /* We ARE the software that manages these! */ + if (region->type == IOMMU_RESV_SW_MSI) continue; - lo = iova_pfn(iovad, window->res->start - window->offset); - hi = iova_pfn(iovad, window->res->end - window->offset); + lo = iova_pfn(iovad, region->start); + hi = iova_pfn(iovad, region->start + region->length - 1); reserve_iova(iovad, lo, hi); + + if (region->type == IOMMU_RESV_MSI) + ret = cookie_init_hw_msi_region(cookie, region->start, + region->start + region->length); + if (ret) + break; } + iommu_put_resv_regions(dev, &resv_regions); + + return ret; } /** @@ -203,7 +271,6 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; unsigned long order, base_pfn, end_pfn; - bool pci = dev && dev_is_pci(dev); if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) return -EINVAL; @@ -233,7 +300,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, * leave the cache limit at the top of their range to save an rb_last() * traversal on every allocation. */ - if (pci) + if (dev && dev_is_pci(dev)) end_pfn &= DMA_BIT_MASK(32) >> order; /* start_pfn is always nonzero for an already-initialised domain */ @@ -248,12 +315,15 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, * area cache limit down for the benefit of the smaller one. */ iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn); - } else { - init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); - if (pci) - iova_reserve_pci_windows(to_pci_dev(dev), iovad); + + return 0; } - return 0; + + init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); + if (!dev) + return 0; + + return iova_reserve_iommu_regions(dev, domain); } EXPORT_SYMBOL(iommu_dma_init_domain); @@ -286,48 +356,67 @@ int dma_info_to_prot(enum dma_data_direction dir, bool coherent, } } -static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, - dma_addr_t dma_limit, struct device *dev) +static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, + size_t size, dma_addr_t dma_limit, struct device *dev) { - struct iova_domain *iovad = cookie_iovad(domain); - unsigned long shift = iova_shift(iovad); - unsigned long length = iova_align(iovad, size) >> shift; - struct iova *iova = NULL; + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + unsigned long shift, iova_len, iova = 0; + + if (cookie->type == IOMMU_DMA_MSI_COOKIE) { + cookie->msi_iova += size; + return cookie->msi_iova - size; + } + + shift = iova_shift(iovad); + iova_len = size >> shift; + /* + * Freeing non-power-of-two-sized allocations back into the IOVA caches + * will come back to bite us badly, so we have to waste a bit of space + * rounding up anything cacheable to make sure that can't happen. The + * order of the unadjusted size will still match upon freeing. + */ + if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) + iova_len = roundup_pow_of_two(iova_len); if (domain->geometry.force_aperture) dma_limit = min(dma_limit, domain->geometry.aperture_end); /* Try to get PCI devices a SAC address */ if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) - iova = alloc_iova(iovad, length, DMA_BIT_MASK(32) >> shift, - true); - /* - * Enforce size-alignment to be safe - there could perhaps be an - * attribute to control this per-device, or at least per-domain... - */ + iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift); + if (!iova) - iova = alloc_iova(iovad, length, dma_limit >> shift, true); + iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift); - return iova; + return (dma_addr_t)iova << shift; } -/* The IOVA allocator knows what we mapped, so just unmap whatever that was */ -static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr) +static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, + dma_addr_t iova, size_t size) { - struct iova_domain *iovad = cookie_iovad(domain); + struct iova_domain *iovad = &cookie->iovad; unsigned long shift = iova_shift(iovad); - unsigned long pfn = dma_addr >> shift; - struct iova *iova = find_iova(iovad, pfn); - size_t size; - if (WARN_ON(!iova)) - return; + /* The MSI case is only ever cleaning up its most recent allocation */ + if (cookie->type == IOMMU_DMA_MSI_COOKIE) + cookie->msi_iova -= size; + else + free_iova_fast(iovad, iova >> shift, size >> shift); +} + +static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, + size_t size) +{ + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + size_t iova_off = iova_offset(iovad, dma_addr); + + dma_addr -= iova_off; + size = iova_align(iovad, size + iova_off); - size = iova_size(iova) << shift; - size -= iommu_unmap(domain, pfn << shift, size); - /* ...and if we can't, then something is horribly, horribly wrong */ - WARN_ON(size > 0); - __free_iova(iovad, iova); + WARN_ON(iommu_unmap(domain, dma_addr, size) != size); + iommu_dma_free_iova(cookie, dma_addr, size); } static void __iommu_dma_free_pages(struct page **pages, int count) @@ -409,7 +498,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, void iommu_dma_free(struct device *dev, struct page **pages, size_t size, dma_addr_t *handle) { - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle); + __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size); __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); *handle = DMA_ERROR_CODE; } @@ -437,11 +526,11 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, void (*flush_page)(struct device *, const void *, phys_addr_t)) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct iova_domain *iovad = cookie_iovad(domain); - struct iova *iova; + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; struct page **pages; struct sg_table sgt; - dma_addr_t dma_addr; + dma_addr_t iova; unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; *handle = DMA_ERROR_CODE; @@ -461,11 +550,11 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, if (!pages) return NULL; - iova = __alloc_iova(domain, size, dev->coherent_dma_mask, dev); + size = iova_align(iovad, size); + iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); if (!iova) goto out_free_pages; - size = iova_align(iovad, size); if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) goto out_free_iova; @@ -481,19 +570,18 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, sg_miter_stop(&miter); } - dma_addr = iova_dma_addr(iovad, iova); - if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot) + if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot) < size) goto out_free_sg; - *handle = dma_addr; + *handle = iova; sg_free_table(&sgt); return pages; out_free_sg: sg_free_table(&sgt); out_free_iova: - __free_iova(iovad, iova); + iommu_dma_free_iova(cookie, iova, size); out_free_pages: __iommu_dma_free_pages(pages, count); return NULL; @@ -527,22 +615,22 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size_t size, int prot) { - dma_addr_t dma_addr; struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct iova_domain *iovad = cookie_iovad(domain); + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; size_t iova_off = iova_offset(iovad, phys); - size_t len = iova_align(iovad, size + iova_off); - struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev), dev); + dma_addr_t iova; + size = iova_align(iovad, size + iova_off); + iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); if (!iova) return DMA_ERROR_CODE; - dma_addr = iova_dma_addr(iovad, iova); - if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) { - __free_iova(iovad, iova); + if (iommu_map(domain, iova, phys - iova_off, size, prot)) { + iommu_dma_free_iova(cookie, iova, size); return DMA_ERROR_CODE; } - return dma_addr + iova_off; + return iova + iova_off; } dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, @@ -554,7 +642,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); + __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); } /* @@ -643,10 +731,10 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int prot) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct iova_domain *iovad = cookie_iovad(domain); - struct iova *iova; + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; struct scatterlist *s, *prev = NULL; - dma_addr_t dma_addr; + dma_addr_t iova; size_t iova_len = 0; unsigned long mask = dma_get_seg_boundary(dev); int i; @@ -690,7 +778,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, prev = s; } - iova = __alloc_iova(domain, iova_len, dma_get_mask(dev), dev); + iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); if (!iova) goto out_restore_sg; @@ -698,14 +786,13 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, * We'll leave any physical concatenation to the IOMMU driver's * implementation - it knows better than we do. */ - dma_addr = iova_dma_addr(iovad, iova); - if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len) + if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) goto out_free_iova; - return __finalise_sg(dev, sg, nents, dma_addr); + return __finalise_sg(dev, sg, nents, iova); out_free_iova: - __free_iova(iovad, iova); + iommu_dma_free_iova(cookie, iova, iova_len); out_restore_sg: __invalidate_sg(sg, nents); return 0; @@ -714,11 +801,21 @@ out_restore_sg: void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { + dma_addr_t start, end; + struct scatterlist *tmp; + int i; /* * The scatterlist segments are mapped into a single * contiguous IOVA allocation, so this is incredibly easy. */ - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg)); + start = sg_dma_address(sg); + for_each_sg(sg_next(sg), tmp, nents - 1, i) { + if (sg_dma_len(tmp) == 0) + break; + sg = tmp; + } + end = sg_dma_address(sg) + sg_dma_len(sg); + __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start); } dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, @@ -731,7 +828,7 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); + __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); } int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) @@ -744,8 +841,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, { struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iommu_dma_msi_page *msi_page; - struct iova_domain *iovad = cookie_iovad(domain); - struct iova *iova; + dma_addr_t iova; int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; size_t size = cookie_msi_granule(cookie); @@ -758,29 +854,16 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, if (!msi_page) return NULL; - msi_page->phys = msi_addr; - if (iovad) { - iova = __alloc_iova(domain, size, dma_get_mask(dev), dev); - if (!iova) - goto out_free_page; - msi_page->iova = iova_dma_addr(iovad, iova); - } else { - msi_page->iova = cookie->msi_iova; - cookie->msi_iova += size; - } - - if (iommu_map(domain, msi_page->iova, msi_addr, size, prot)) - goto out_free_iova; + iova = __iommu_dma_map(dev, msi_addr, size, prot); + if (iommu_dma_mapping_error(dev, iova)) + goto out_free_page; INIT_LIST_HEAD(&msi_page->list); + msi_page->phys = msi_addr; + msi_page->iova = iova; list_add(&msi_page->list, &cookie->msi_page_list); return msi_page; -out_free_iova: - if (iovad) - __free_iova(iovad, iova); - else - cookie->msi_iova -= size; out_free_page: kfree(msi_page); return NULL; diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 36e3f430d265..cbf7763d8091 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -311,7 +311,7 @@ static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info) ((void *)drhd) + drhd->header.length, dmaru->segment, dmaru->devices, dmaru->devices_cnt); - if (ret != 0) + if (ret) break; } if (ret >= 0) @@ -391,7 +391,7 @@ static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_hardware_unit *drhd; struct dmar_drhd_unit *dmaru; - int ret = 0; + int ret; drhd = (struct acpi_dmar_hardware_unit *)header; dmaru = dmar_find_dmaru(drhd); @@ -551,17 +551,16 @@ static int __init dmar_table_detect(void) status = AE_NOT_FOUND; } - return (ACPI_SUCCESS(status) ? 1 : 0); + return ACPI_SUCCESS(status) ? 0 : -ENOENT; } static int dmar_walk_remapping_entries(struct acpi_dmar_header *start, size_t len, struct dmar_res_callback *cb) { - int ret = 0; struct acpi_dmar_header *iter, *next; struct acpi_dmar_header *end = ((void *)start) + len; - for (iter = start; iter < end && ret == 0; iter = next) { + for (iter = start; iter < end; iter = next) { next = (void *)iter + iter->length; if (iter->length == 0) { /* Avoid looping forever on bad ACPI tables */ @@ -570,8 +569,7 @@ static int dmar_walk_remapping_entries(struct acpi_dmar_header *start, } else if (next > end) { /* Avoid passing table end */ pr_warn(FW_BUG "Record passes table end\n"); - ret = -EINVAL; - break; + return -EINVAL; } if (cb->print_entry) @@ -582,15 +580,19 @@ static int dmar_walk_remapping_entries(struct acpi_dmar_header *start, pr_debug("Unknown DMAR structure type %d\n", iter->type); } else if (cb->cb[iter->type]) { + int ret; + ret = cb->cb[iter->type](iter, cb->arg[iter->type]); + if (ret) + return ret; } else if (!cb->ignore_unhandled) { pr_warn("No handler for DMAR structure type %d\n", iter->type); - ret = -EINVAL; + return -EINVAL; } } - return ret; + return 0; } static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar, @@ -607,8 +609,8 @@ static int __init parse_dmar_table(void) { struct acpi_table_dmar *dmar; - int ret = 0; int drhd_count = 0; + int ret; struct dmar_res_callback cb = { .print_entry = true, .ignore_unhandled = true, @@ -891,17 +893,17 @@ int __init detect_intel_iommu(void) down_write(&dmar_global_lock); ret = dmar_table_detect(); - if (ret) - ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl, - &validate_drhd_cb); - if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { + if (!ret) + ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl, + &validate_drhd_cb); + if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) { iommu_detected = 1; /* Make sure ACS will be enabled */ pci_request_acs(); } #ifdef CONFIG_X86 - if (ret) + if (!ret) x86_init.iommu.iommu_init = intel_iommu_init; #endif @@ -911,10 +913,9 @@ int __init detect_intel_iommu(void) } up_write(&dmar_global_lock); - return ret ? 1 : -ENODEV; + return ret ? ret : 1; } - static void unmap_iommu(struct intel_iommu *iommu) { iounmap(iommu->reg); diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index c01bfcdb2383..2395478dde75 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -171,6 +171,9 @@ static u32 lv2ent_offset(sysmmu_iova_t iova) #define REG_V5_PT_BASE_PFN 0x00C #define REG_V5_MMU_FLUSH_ALL 0x010 #define REG_V5_MMU_FLUSH_ENTRY 0x014 +#define REG_V5_MMU_FLUSH_RANGE 0x018 +#define REG_V5_MMU_FLUSH_START 0x020 +#define REG_V5_MMU_FLUSH_END 0x024 #define REG_V5_INT_STATUS 0x060 #define REG_V5_INT_CLEAR 0x064 #define REG_V5_FAULT_AR_VA 0x070 @@ -319,14 +322,23 @@ static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, { unsigned int i; - for (i = 0; i < num_inv; i++) { - if (MMU_MAJ_VER(data->version) < 5) + if (MMU_MAJ_VER(data->version) < 5) { + for (i = 0; i < num_inv; i++) { writel((iova & SPAGE_MASK) | 1, data->sfrbase + REG_MMU_FLUSH_ENTRY); - else + iova += SPAGE_SIZE; + } + } else { + if (num_inv == 1) { writel((iova & SPAGE_MASK) | 1, data->sfrbase + REG_V5_MMU_FLUSH_ENTRY); - iova += SPAGE_SIZE; + } else { + writel((iova & SPAGE_MASK), + data->sfrbase + REG_V5_MMU_FLUSH_START); + writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE, + data->sfrbase + REG_V5_MMU_FLUSH_END); + writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE); + } } } @@ -747,16 +759,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) goto err_counter; /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ - for (i = 0; i < NUM_LV1ENTRIES; i += 8) { - domain->pgtable[i + 0] = ZERO_LV2LINK; - domain->pgtable[i + 1] = ZERO_LV2LINK; - domain->pgtable[i + 2] = ZERO_LV2LINK; - domain->pgtable[i + 3] = ZERO_LV2LINK; - domain->pgtable[i + 4] = ZERO_LV2LINK; - domain->pgtable[i + 5] = ZERO_LV2LINK; - domain->pgtable[i + 6] = ZERO_LV2LINK; - domain->pgtable[i + 7] = ZERO_LV2LINK; - } + for (i = 0; i < NUM_LV1ENTRIES; i++) + domain->pgtable[i] = ZERO_LV2LINK; handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, DMA_TO_DEVICE); diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h index aab723f91f12..c3434f29c967 100644 --- a/drivers/iommu/fsl_pamu.h +++ b/drivers/iommu/fsl_pamu.h @@ -20,6 +20,7 @@ #define __FSL_PAMU_H #include <linux/iommu.h> +#include <linux/pci.h> #include <asm/fsl_pamu_stash.h> diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index d412a313a372..90ab0115d78e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -183,6 +183,7 @@ static int rwbf_quirk; * (used when kernel is launched w/ TXT) */ static int force_on = 0; +int intel_iommu_tboot_noforce; /* * 0: Present @@ -607,6 +608,10 @@ static int __init intel_iommu_setup(char *str) "Intel-IOMMU: enable pre-production PASID support\n"); intel_iommu_pasid28 = 1; iommu_identity_mapping |= IDENTMAP_GFX; + } else if (!strncmp(str, "tboot_noforce", 13)) { + printk(KERN_INFO + "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); + intel_iommu_tboot_noforce = 1; } str += strcspn(str, ","); @@ -4730,6 +4735,15 @@ static int intel_iommu_cpu_dead(unsigned int cpu) return 0; } +static void intel_disable_iommus(void) +{ + struct intel_iommu *iommu = NULL; + struct dmar_drhd_unit *drhd; + + for_each_iommu(iommu, drhd) + iommu_disable_translation(iommu); +} + static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) { return container_of(dev, struct intel_iommu, iommu.dev); @@ -4840,8 +4854,28 @@ int __init intel_iommu_init(void) goto out_free_dmar; } - if (no_iommu || dmar_disabled) + if (no_iommu || dmar_disabled) { + /* + * We exit the function here to ensure IOMMU's remapping and + * mempool aren't setup, which means that the IOMMU's PMRs + * won't be disabled via the call to init_dmars(). So disable + * it explicitly here. The PMRs were setup by tboot prior to + * calling SENTER, but the kernel is expected to reset/tear + * down the PMRs. + */ + if (intel_iommu_tboot_noforce) { + for_each_iommu(iommu, drhd) + iommu_disable_protect_mem_regions(iommu); + } + + /* + * Make sure the IOMMUs are switched off, even when we + * boot into a kexec kernel and the previous kernel left + * them enabled + */ + intel_disable_iommus(); goto out_free_dmar; + } if (list_empty(&dmar_rmrr_units)) pr_info("No RMRR found\n"); diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index ac596928f6b4..a190cbd76ef7 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -408,14 +408,6 @@ static int iommu_load_old_irte(struct intel_iommu *iommu) size_t size; u64 irta; - if (!is_kdump_kernel()) { - pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n", - iommu->name); - clear_ir_pre_enabled(iommu); - iommu_disable_irq_remapping(iommu); - return -EINVAL; - } - /* Check whether the old ir-table has the same size as ours */ irta = dmar_readq(iommu->reg + DMAR_IRTA_REG); if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK) @@ -567,7 +559,12 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) init_ir_status(iommu); if (ir_pre_enabled(iommu)) { - if (iommu_load_old_irte(iommu)) + if (!is_kdump_kernel()) { + pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n", + iommu->name); + clear_ir_pre_enabled(iommu); + iommu_disable_irq_remapping(iommu); + } else if (iommu_load_old_irte(iommu)) pr_err("Failed to copy IR table for %s from previous kernel\n", iommu->name); else diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index f9bc6ebb8140..6e5df5e0a3bd 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -74,7 +74,7 @@ /* Calculate the block/page mapping size at level l for pagetable in d. */ #define ARM_LPAE_BLOCK_SIZE(l,d) \ - (1 << (ilog2(sizeof(arm_lpae_iopte)) + \ + (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \ ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level))) /* Page table bits */ diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 3b67144dead2..cf7ca7e70777 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -36,6 +36,7 @@ static struct kset *iommu_group_kset; static DEFINE_IDA(iommu_group_ida); +static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA; struct iommu_callback_data { const struct iommu_ops *ops; @@ -112,6 +113,18 @@ static int __iommu_attach_group(struct iommu_domain *domain, static void __iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group); +static int __init iommu_set_def_domain_type(char *str) +{ + bool pt; + + if (!str || strtobool(str, &pt)) + return -EINVAL; + + iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA; + return 0; +} +early_param("iommu.passthrough", iommu_set_def_domain_type); + static ssize_t iommu_group_attr_show(struct kobject *kobj, struct attribute *__attr, char *buf) { @@ -1015,10 +1028,19 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) * IOMMU driver. */ if (!group->default_domain) { - group->default_domain = __iommu_domain_alloc(dev->bus, - IOMMU_DOMAIN_DMA); + struct iommu_domain *dom; + + dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type); + if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) { + dev_warn(dev, + "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA", + iommu_def_domain_type); + dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA); + } + + group->default_domain = dom; if (!group->domain) - group->domain = group->default_domain; + group->domain = dom; } ret = iommu_group_add_device(group, dev); @@ -1083,8 +1105,12 @@ static int iommu_bus_notifier(struct notifier_block *nb, * result in ADD/DEL notifiers to group->notifier */ if (action == BUS_NOTIFY_ADD_DEVICE) { - if (ops->add_device) - return ops->add_device(dev); + if (ops->add_device) { + int ret; + + ret = ops->add_device(dev); + return (ret) ? NOTIFY_DONE : NOTIFY_OK; + } } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { if (ops->remove_device && dev->iommu_group) { ops->remove_device(dev); @@ -1652,6 +1678,48 @@ void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr) } EXPORT_SYMBOL_GPL(iommu_domain_window_disable); +/** + * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework + * @domain: the iommu domain where the fault has happened + * @dev: the device where the fault has happened + * @iova: the faulting address + * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) + * + * This function should be called by the low-level IOMMU implementations + * whenever IOMMU faults happen, to allow high-level users, that are + * interested in such events, to know about them. + * + * This event may be useful for several possible use cases: + * - mere logging of the event + * - dynamic TLB/PTE loading + * - if restarting of the faulting device is required + * + * Returns 0 on success and an appropriate error code otherwise (if dynamic + * PTE/TLB loading will one day be supported, implementations will be able + * to tell whether it succeeded or not according to this return value). + * + * Specifically, -ENOSYS is returned if a fault handler isn't installed + * (though fault handlers can also return -ENOSYS, in case they want to + * elicit the default behavior of the IOMMU drivers). + */ +int report_iommu_fault(struct iommu_domain *domain, struct device *dev, + unsigned long iova, int flags) +{ + int ret = -ENOSYS; + + /* + * if upper layers showed interest and installed a fault handler, + * invoke it. + */ + if (domain->handler) + ret = domain->handler(domain, dev, iova, flags, + domain->handler_token); + + trace_io_page_fault(dev, iova, flags); + return ret; +} +EXPORT_SYMBOL_GPL(report_iommu_fault); + static int __init iommu_init(void) { iommu_group_kset = kset_create_and_add("iommu_groups", diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index e80a4105ac2a..5c88ba70e4e0 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -166,7 +166,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, break; /* found a free slot */ } adjust_limit_pfn: - limit_pfn = curr_iova->pfn_lo - 1; + limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0; move_left: prev = curr; curr = rb_prev(curr); diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index 19e010083408..a27ef570c328 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -431,9 +431,10 @@ err_release_mapping: static int mtk_iommu_add_device(struct device *dev) { - struct iommu_group *group; struct of_phandle_args iommu_spec; struct of_phandle_iterator it; + struct mtk_iommu_data *data; + struct iommu_group *group; int err; of_for_each_phandle(&it, err, dev->of_node, "iommus", @@ -450,6 +451,9 @@ static int mtk_iommu_add_device(struct device *dev) if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) return -ENODEV; /* Not a iommu client device */ + data = dev->iommu_fwspec->iommu_priv; + iommu_device_link(&data->iommu, dev); + group = iommu_group_get_for_dev(dev); if (IS_ERR(group)) return PTR_ERR(group); @@ -460,9 +464,14 @@ static int mtk_iommu_add_device(struct device *dev) static void mtk_iommu_remove_device(struct device *dev) { + struct mtk_iommu_data *data; + if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) return; + data = dev->iommu_fwspec->iommu_priv; + iommu_device_unlink(&data->iommu, dev); + iommu_group_remove_device(dev); iommu_fwspec_free(dev); } @@ -627,6 +636,17 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (ret) return ret; + ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, + dev_name(&pdev->dev)); + if (ret) + return ret; + + iommu_device_set_ops(&data->iommu, &mtk_iommu_ops); + + ret = iommu_device_register(&data->iommu); + if (ret) + return ret; + if (!iommu_present(&platform_bus_type)) bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); @@ -637,6 +657,9 @@ static int mtk_iommu_remove(struct platform_device *pdev) { struct mtk_iommu_data *data = platform_get_drvdata(pdev); + iommu_device_sysfs_remove(&data->iommu); + iommu_device_unregister(&data->iommu); + if (iommu_present(&platform_bus_type)) bus_set_iommu(&platform_bus_type, NULL); diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 2683e9fc0dcf..9f44ee8ea1bc 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -96,6 +96,49 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index, } EXPORT_SYMBOL_GPL(of_get_dma_window); +static bool of_iommu_driver_present(struct device_node *np) +{ + /* + * If the IOMMU still isn't ready by the time we reach init, assume + * it never will be. We don't want to defer indefinitely, nor attempt + * to dereference __iommu_of_table after it's been freed. + */ + if (system_state > SYSTEM_BOOTING) + return false; + + return of_match_node(&__iommu_of_table, np); +} + +static const struct iommu_ops +*of_iommu_xlate(struct device *dev, struct of_phandle_args *iommu_spec) +{ + const struct iommu_ops *ops; + struct fwnode_handle *fwnode = &iommu_spec->np->fwnode; + int err; + + ops = iommu_ops_from_fwnode(fwnode); + if ((ops && !ops->of_xlate) || + (!ops && !of_iommu_driver_present(iommu_spec->np))) + return NULL; + + err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops); + if (err) + return ERR_PTR(err); + /* + * The otherwise-empty fwspec handily serves to indicate the specific + * IOMMU device we're waiting for, which will be useful if we ever get + * a proper probe-ordering dependency mechanism in future. + */ + if (!ops) + return ERR_PTR(-EPROBE_DEFER); + + err = ops->of_xlate(dev, iommu_spec); + if (err) + return ERR_PTR(err); + + return ops; +} + static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) { struct of_phandle_args *iommu_spec = data; @@ -105,10 +148,11 @@ static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) } static const struct iommu_ops -*of_pci_iommu_configure(struct pci_dev *pdev, struct device_node *bridge_np) +*of_pci_iommu_init(struct pci_dev *pdev, struct device_node *bridge_np) { const struct iommu_ops *ops; struct of_phandle_args iommu_spec; + int err; /* * Start by tracing the RID alias down the PCI topology as @@ -123,56 +167,76 @@ static const struct iommu_ops * bus into the system beyond, and which IOMMU it ends up at. */ iommu_spec.np = NULL; - if (of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map", - "iommu-map-mask", &iommu_spec.np, iommu_spec.args)) - return NULL; + err = of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map", + "iommu-map-mask", &iommu_spec.np, + iommu_spec.args); + if (err) + return err == -ENODEV ? NULL : ERR_PTR(err); - ops = iommu_ops_from_fwnode(&iommu_spec.np->fwnode); - if (!ops || !ops->of_xlate || - iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) || - ops->of_xlate(&pdev->dev, &iommu_spec)) - ops = NULL; + ops = of_iommu_xlate(&pdev->dev, &iommu_spec); of_node_put(iommu_spec.np); return ops; } -const struct iommu_ops *of_iommu_configure(struct device *dev, - struct device_node *master_np) +static const struct iommu_ops +*of_platform_iommu_init(struct device *dev, struct device_node *np) { struct of_phandle_args iommu_spec; - struct device_node *np; const struct iommu_ops *ops = NULL; int idx = 0; - if (dev_is_pci(dev)) - return of_pci_iommu_configure(to_pci_dev(dev), master_np); - /* * We don't currently walk up the tree looking for a parent IOMMU. * See the `Notes:' section of * Documentation/devicetree/bindings/iommu/iommu.txt */ - while (!of_parse_phandle_with_args(master_np, "iommus", - "#iommu-cells", idx, - &iommu_spec)) { - np = iommu_spec.np; - ops = iommu_ops_from_fwnode(&np->fwnode); - - if (!ops || !ops->of_xlate || - iommu_fwspec_init(dev, &np->fwnode, ops) || - ops->of_xlate(dev, &iommu_spec)) - goto err_put_node; - - of_node_put(np); + while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", + idx, &iommu_spec)) { + ops = of_iommu_xlate(dev, &iommu_spec); + of_node_put(iommu_spec.np); idx++; + if (IS_ERR_OR_NULL(ops)) + break; } return ops; +} + +const struct iommu_ops *of_iommu_configure(struct device *dev, + struct device_node *master_np) +{ + const struct iommu_ops *ops; + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + + if (!master_np) + return NULL; + + if (fwspec) { + if (fwspec->ops) + return fwspec->ops; + + /* In the deferred case, start again from scratch */ + iommu_fwspec_free(dev); + } -err_put_node: - of_node_put(np); - return NULL; + if (dev_is_pci(dev)) + ops = of_pci_iommu_init(to_pci_dev(dev), master_np); + else + ops = of_platform_iommu_init(dev, master_np); + /* + * If we have reason to believe the IOMMU driver missed the initial + * add_device callback for dev, replay it to get things in order. + */ + if (!IS_ERR_OR_NULL(ops) && ops->add_device && + dev->bus && !dev->iommu_group) { + int err = ops->add_device(dev); + + if (err) + ops = ERR_PTR(err); + } + + return ops; } static int __init of_iommu_init(void) @@ -183,7 +247,7 @@ static int __init of_iommu_init(void) for_each_matching_node_and_match(np, matches, &match) { const of_iommu_init_fn init_fn = match->data; - if (init_fn(np)) + if (init_fn && init_fn(np)) pr_err("Failed to initialise IOMMU %s\n", of_node_full_name(np)); } diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index e2583cce2cc1..95dfca36ccb9 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -36,28 +36,14 @@ #include "omap-iopgtable.h" #include "omap-iommu.h" +static const struct iommu_ops omap_iommu_ops; + #define to_iommu(dev) \ ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))) /* bitmap of the page sizes currently supported */ #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) -/** - * struct omap_iommu_domain - omap iommu domain - * @pgtable: the page table - * @iommu_dev: an omap iommu device attached to this domain. only a single - * iommu device can be attached for now. - * @dev: Device using this domain. - * @lock: domain lock, should be taken when attaching/detaching - */ -struct omap_iommu_domain { - u32 *pgtable; - struct omap_iommu *iommu_dev; - struct device *dev; - spinlock_t lock; - struct iommu_domain domain; -}; - #define MMU_LOCK_BASE_SHIFT 10 #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT) #define MMU_LOCK_BASE(x) \ @@ -818,33 +804,14 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) return IRQ_NONE; } -static int device_match_by_alias(struct device *dev, void *data) -{ - struct omap_iommu *obj = to_iommu(dev); - const char *name = data; - - pr_debug("%s: %s %s\n", __func__, obj->name, name); - - return strcmp(obj->name, name) == 0; -} - /** * omap_iommu_attach() - attach iommu device to an iommu domain - * @name: name of target omap iommu device + * @obj: target omap iommu device * @iopgd: page table **/ -static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) +static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd) { int err; - struct device *dev; - struct omap_iommu *obj; - - dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, - device_match_by_alias); - if (!dev) - return ERR_PTR(-ENODEV); - - obj = to_iommu(dev); spin_lock(&obj->iommu_lock); @@ -857,11 +824,13 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) spin_unlock(&obj->iommu_lock); dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); - return obj; + + return 0; err_enable: spin_unlock(&obj->iommu_lock); - return ERR_PTR(err); + + return err; } /** @@ -928,28 +897,26 @@ static int omap_iommu_probe(struct platform_device *pdev) int irq; struct omap_iommu *obj; struct resource *res; - struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); struct device_node *of = pdev->dev.of_node; + if (!of) { + pr_err("%s: only DT-based devices are supported\n", __func__); + return -ENODEV; + } + obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); if (!obj) return -ENOMEM; - if (of) { - obj->name = dev_name(&pdev->dev); - obj->nr_tlb_entries = 32; - err = of_property_read_u32(of, "ti,#tlb-entries", - &obj->nr_tlb_entries); - if (err && err != -EINVAL) - return err; - if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) - return -EINVAL; - if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) - obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; - } else { - obj->nr_tlb_entries = pdata->nr_tlb_entries; - obj->name = pdata->name; - } + obj->name = dev_name(&pdev->dev); + obj->nr_tlb_entries = 32; + err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries); + if (err && err != -EINVAL) + return err; + if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) + return -EINVAL; + if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) + obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; obj->dev = &pdev->dev; obj->ctx = (void *)obj + sizeof(*obj); @@ -976,19 +943,46 @@ static int omap_iommu_probe(struct platform_device *pdev) return err; platform_set_drvdata(pdev, obj); + obj->group = iommu_group_alloc(); + if (IS_ERR(obj->group)) + return PTR_ERR(obj->group); + + err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL, obj->name); + if (err) + goto out_group; + + iommu_device_set_ops(&obj->iommu, &omap_iommu_ops); + + err = iommu_device_register(&obj->iommu); + if (err) + goto out_sysfs; + pm_runtime_irq_safe(obj->dev); pm_runtime_enable(obj->dev); omap_iommu_debugfs_add(obj); dev_info(&pdev->dev, "%s registered\n", obj->name); + return 0; + +out_sysfs: + iommu_device_sysfs_remove(&obj->iommu); +out_group: + iommu_group_put(obj->group); + return err; } static int omap_iommu_remove(struct platform_device *pdev) { struct omap_iommu *obj = platform_get_drvdata(pdev); + iommu_group_put(obj->group); + obj->group = NULL; + + iommu_device_sysfs_remove(&obj->iommu); + iommu_device_unregister(&obj->iommu); + omap_iommu_debugfs_remove(obj); pm_runtime_disable(obj->dev); @@ -1077,11 +1071,11 @@ static int omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) { struct omap_iommu_domain *omap_domain = to_omap_domain(domain); - struct omap_iommu *oiommu; struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; + struct omap_iommu *oiommu; int ret = 0; - if (!arch_data || !arch_data->name) { + if (!arch_data || !arch_data->iommu_dev) { dev_err(dev, "device doesn't have an associated iommu\n"); return -EINVAL; } @@ -1095,15 +1089,16 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) goto out; } + oiommu = arch_data->iommu_dev; + /* get a handle to and enable the omap iommu */ - oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable); - if (IS_ERR(oiommu)) { - ret = PTR_ERR(oiommu); + ret = omap_iommu_attach(oiommu, omap_domain->pgtable); + if (ret) { dev_err(dev, "can't get omap iommu: %d\n", ret); goto out; } - omap_domain->iommu_dev = arch_data->iommu_dev = oiommu; + omap_domain->iommu_dev = oiommu; omap_domain->dev = dev; oiommu->domain = domain; @@ -1116,7 +1111,6 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, struct device *dev) { struct omap_iommu *oiommu = dev_to_omap_iommu(dev); - struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; /* only a single device is supported per domain for now */ if (omap_domain->iommu_dev != oiommu) { @@ -1128,7 +1122,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, omap_iommu_detach(oiommu); - omap_domain->iommu_dev = arch_data->iommu_dev = NULL; + omap_domain->iommu_dev = NULL; omap_domain->dev = NULL; oiommu->domain = NULL; } @@ -1232,8 +1226,11 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, static int omap_iommu_add_device(struct device *dev) { struct omap_iommu_arch_data *arch_data; + struct omap_iommu *oiommu; + struct iommu_group *group; struct device_node *np; struct platform_device *pdev; + int ret; /* * Allocate the archdata iommu structure for DT-based devices. @@ -1254,15 +1251,41 @@ static int omap_iommu_add_device(struct device *dev) return -EINVAL; } + oiommu = platform_get_drvdata(pdev); + if (!oiommu) { + of_node_put(np); + return -EINVAL; + } + arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL); if (!arch_data) { of_node_put(np); return -ENOMEM; } - arch_data->name = kstrdup(dev_name(&pdev->dev), GFP_KERNEL); + ret = iommu_device_link(&oiommu->iommu, dev); + if (ret) { + kfree(arch_data); + of_node_put(np); + return ret; + } + + arch_data->iommu_dev = oiommu; dev->archdata.iommu = arch_data; + /* + * IOMMU group initialization calls into omap_iommu_device_group, which + * needs a valid dev->archdata.iommu pointer + */ + group = iommu_group_get_for_dev(dev); + if (IS_ERR(group)) { + iommu_device_unlink(&oiommu->iommu, dev); + dev->archdata.iommu = NULL; + kfree(arch_data); + return PTR_ERR(group); + } + iommu_group_put(group); + of_node_put(np); return 0; @@ -1275,8 +1298,23 @@ static void omap_iommu_remove_device(struct device *dev) if (!dev->of_node || !arch_data) return; - kfree(arch_data->name); + iommu_device_unlink(&arch_data->iommu_dev->iommu, dev); + iommu_group_remove_device(dev); + + dev->archdata.iommu = NULL; kfree(arch_data); + +} + +static struct iommu_group *omap_iommu_device_group(struct device *dev) +{ + struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; + struct iommu_group *group = NULL; + + if (arch_data->iommu_dev) + group = arch_data->iommu_dev->group; + + return group; } static const struct iommu_ops omap_iommu_ops = { @@ -1290,6 +1328,7 @@ static const struct iommu_ops omap_iommu_ops = { .iova_to_phys = omap_iommu_iova_to_phys, .add_device = omap_iommu_add_device, .remove_device = omap_iommu_remove_device, + .device_group = omap_iommu_device_group, .pgsize_bitmap = OMAP_IOMMU_PGSIZES, }; @@ -1299,6 +1338,7 @@ static int __init omap_iommu_init(void) const unsigned long flags = SLAB_HWCACHE_ALIGN; size_t align = 1 << 10; /* L2 pagetable alignement */ struct device_node *np; + int ret; np = of_find_matching_node(NULL, omap_iommu_of_match); if (!np) @@ -1312,11 +1352,25 @@ static int __init omap_iommu_init(void) return -ENOMEM; iopte_cachep = p; - bus_set_iommu(&platform_bus_type, &omap_iommu_ops); - omap_iommu_debugfs_init(); - return platform_driver_register(&omap_iommu_driver); + ret = platform_driver_register(&omap_iommu_driver); + if (ret) { + pr_err("%s: failed to register driver\n", __func__); + goto fail_driver; + } + + ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops); + if (ret) + goto fail_bus; + + return 0; + +fail_bus: + platform_driver_unregister(&omap_iommu_driver); +fail_driver: + kmem_cache_destroy(iopte_cachep); + return ret; } subsys_initcall(omap_iommu_init); /* must be ready before omap3isp is probed */ diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 59628e5017b4..6e70515e6038 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -14,6 +14,7 @@ #define _OMAP_IOMMU_H #include <linux/bitops.h> +#include <linux/iommu.h> #define for_each_iotlb_cr(obj, n, __i, cr) \ for (__i = 0; \ @@ -27,6 +28,23 @@ struct iotlb_entry { u32 endian, elsz, mixed; }; +/** + * struct omap_iommu_domain - omap iommu domain + * @pgtable: the page table + * @iommu_dev: an omap iommu device attached to this domain. only a single + * iommu device can be attached for now. + * @dev: Device using this domain. + * @lock: domain lock, should be taken when attaching/detaching + * @domain: generic domain handle used by iommu core code + */ +struct omap_iommu_domain { + u32 *pgtable; + struct omap_iommu *iommu_dev; + struct device *dev; + spinlock_t lock; + struct iommu_domain domain; +}; + struct omap_iommu { const char *name; void __iomem *regbase; @@ -50,6 +68,22 @@ struct omap_iommu { int has_bus_err_back; u32 id; + + struct iommu_device iommu; + struct iommu_group *group; +}; + +/** + * struct omap_iommu_arch_data - omap iommu private data + * @iommu_dev: handle of the iommu device + * + * This is an omap iommu private data object, which binds an iommu user + * to its iommu device. This object should be placed at the iommu user's + * dev_archdata so generic IOMMU API can be used without having to + * utilize omap-specific plumbing anymore. + */ +struct omap_iommu_arch_data { + struct omap_iommu *iommu_dev; }; struct cr_regs { diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 9afcbf79f0b0..4ba48a26b389 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -8,6 +8,7 @@ #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-iommu.h> +#include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -90,6 +91,7 @@ struct rk_iommu { void __iomem **bases; int num_mmu; int irq; + struct iommu_device iommu; struct list_head node; /* entry in rk_iommu_domain.iommus */ struct iommu_domain *domain; /* domain to which iommu is attached */ }; @@ -1032,6 +1034,7 @@ static int rk_iommu_group_set_iommudata(struct iommu_group *group, static int rk_iommu_add_device(struct device *dev) { struct iommu_group *group; + struct rk_iommu *iommu; int ret; if (!rk_iommu_is_dev_iommu_master(dev)) @@ -1054,6 +1057,10 @@ static int rk_iommu_add_device(struct device *dev) if (ret) goto err_remove_device; + iommu = rk_iommu_from_dev(dev); + if (iommu) + iommu_device_link(&iommu->iommu, dev); + iommu_group_put(group); return 0; @@ -1067,9 +1074,15 @@ err_put_group: static void rk_iommu_remove_device(struct device *dev) { + struct rk_iommu *iommu; + if (!rk_iommu_is_dev_iommu_master(dev)) return; + iommu = rk_iommu_from_dev(dev); + if (iommu) + iommu_device_unlink(&iommu->iommu, dev); + iommu_group_remove_device(dev); } @@ -1117,7 +1130,7 @@ static int rk_iommu_probe(struct platform_device *pdev) struct rk_iommu *iommu; struct resource *res; int num_res = pdev->num_resources; - int i; + int err, i; iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); if (!iommu) @@ -1150,11 +1163,25 @@ static int rk_iommu_probe(struct platform_device *pdev) return -ENXIO; } - return 0; + err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); + if (err) + return err; + + iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops); + err = iommu_device_register(&iommu->iommu); + + return err; } static int rk_iommu_remove(struct platform_device *pdev) { + struct rk_iommu *iommu = platform_get_drvdata(pdev); + + if (iommu) { + iommu_device_sysfs_remove(&iommu->iommu); + iommu_device_unregister(&iommu->iommu); + } + return 0; } diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 9305964250ac..eeb19f560a05 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -15,6 +15,7 @@ #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/dma-mapping.h> #include <soc/tegra/ahb.h> #include <soc/tegra/mc.h> diff --git a/drivers/isdn/hardware/avm/b1isa.c b/drivers/isdn/hardware/avm/b1isa.c index 31ef8130a87f..54e871a47387 100644 --- a/drivers/isdn/hardware/avm/b1isa.c +++ b/drivers/isdn/hardware/avm/b1isa.c @@ -169,8 +169,8 @@ static struct pci_dev isa_dev[MAX_CARDS]; static int io[MAX_CARDS]; static int irq[MAX_CARDS]; -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); MODULE_PARM_DESC(io, "I/O base address(es)"); MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); diff --git a/drivers/isdn/hardware/avm/t1isa.c b/drivers/isdn/hardware/avm/t1isa.c index 72ef18853951..9516203c735f 100644 --- a/drivers/isdn/hardware/avm/t1isa.c +++ b/drivers/isdn/hardware/avm/t1isa.c @@ -516,8 +516,8 @@ static int io[MAX_CARDS]; static int irq[MAX_CARDS]; static int cardnr[MAX_CARDS]; -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); module_param_array(cardnr, int, NULL, 0); MODULE_PARM_DESC(io, "I/O base address(es)"); MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index 2d12c6ceeb89..c7d68675b028 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c @@ -350,13 +350,13 @@ MODULE_AUTHOR("Karsten Keil"); MODULE_LICENSE("GPL"); module_param_array(type, int, NULL, 0); module_param_array(protocol, int, NULL, 0); -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(mem, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); +module_param_hw_array(mem, int, iomem, NULL, 0); module_param(id, charp, 0); #ifdef IO0_IO1 -module_param_array(io0, int, NULL, 0); -module_param_array(io1, int, NULL, 0); +module_param_hw_array(io0, int, ioport, NULL, 0); +module_param_hw_array(io1, int, ioport, NULL, 0); #endif #endif /* MODULE */ diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c index 5266755add63..4680f001653a 100644 --- a/drivers/media/pci/zoran/zoran_card.c +++ b/drivers/media/pci/zoran/zoran_card.c @@ -69,7 +69,7 @@ MODULE_PARM_DESC(card, "Card type"); */ static unsigned long vidmem; /* default = 0 - Video memory base address */ -module_param(vidmem, ulong, 0444); +module_param_hw(vidmem, ulong, iomem, 0444); MODULE_PARM_DESC(vidmem, "Default video memory base address"); /* diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c index aa44e11decca..853d598937f6 100644 --- a/drivers/media/platform/mtk-vpu/mtk_vpu.c +++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c @@ -23,6 +23,7 @@ #include <linux/of_reserved_mem.h> #include <linux/sched.h> #include <linux/sizes.h> +#include <linux/dma-mapping.h> #include "mtk_vpu.h" diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 084ecf4aa9a4..0d984a28a003 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -1943,30 +1943,13 @@ static void isp_detach_iommu(struct isp_device *isp) { arm_iommu_release_mapping(isp->mapping); isp->mapping = NULL; - iommu_group_remove_device(isp->dev); } static int isp_attach_iommu(struct isp_device *isp) { struct dma_iommu_mapping *mapping; - struct iommu_group *group; int ret; - /* Create a device group and add the device to it. */ - group = iommu_group_alloc(); - if (IS_ERR(group)) { - dev_err(isp->dev, "failed to allocate IOMMU group\n"); - return PTR_ERR(group); - } - - ret = iommu_group_add_device(group, isp->dev); - iommu_group_put(group); - - if (ret < 0) { - dev_err(isp->dev, "failed to add device to IPMMU group\n"); - return ret; - } - /* * Create the ARM mapping, used by the ARM DMA mapping core to allocate * VAs. This will allocate a corresponding IOMMU domain. diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h index 7e6f6638433b..2f2ae609c548 100644 --- a/drivers/media/platform/omap3isp/isp.h +++ b/drivers/media/platform/omap3isp/isp.h @@ -23,7 +23,6 @@ #include <linux/clk-provider.h> #include <linux/device.h> #include <linux/io.h> -#include <linux/iommu.h> #include <linux/platform_device.h> #include <linux/wait.h> diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c index 2f0a0d248936..77d5d4cbed0a 100644 --- a/drivers/media/rc/serial_ir.c +++ b/drivers/media/rc/serial_ir.c @@ -833,11 +833,11 @@ MODULE_LICENSE("GPL"); module_param(type, int, 0444); MODULE_PARM_DESC(type, "Hardware type (0 = home-brew, 1 = IRdeo, 2 = IRdeo Remote, 3 = AnimaX, 4 = IgorPlug"); -module_param(io, int, 0444); +module_param_hw(io, int, ioport, 0444); MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)"); /* some architectures (e.g. intel xscale) have memory mapped registers */ -module_param(iommap, ulong, 0444); +module_param_hw(iommap, ulong, other, 0444); MODULE_PARM_DESC(iommap, "physical base for memory mapped I/O (0 = no memory mapped io)"); /* @@ -845,13 +845,13 @@ MODULE_PARM_DESC(iommap, "physical base for memory mapped I/O (0 = no memory map * on 32bit word boundaries. * See linux-kernel/drivers/tty/serial/8250/8250.c serial_in()/out() */ -module_param(ioshift, int, 0444); +module_param_hw(ioshift, int, other, 0444); MODULE_PARM_DESC(ioshift, "shift I/O register offset (0 = no shift)"); -module_param(irq, int, 0444); +module_param_hw(irq, int, irq, 0444); MODULE_PARM_DESC(irq, "Interrupt (4 or 3)"); -module_param(share_irq, bool, 0444); +module_param_hw(share_irq, bool, other, 0444); MODULE_PARM_DESC(share_irq, "Share interrupts (0 = off, 1 = on)"); module_param(sense, int, 0444); diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index 3ecc429297a0..ffc350258041 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -116,7 +116,7 @@ config FSL_CORENET_CF config FSL_IFC bool - depends on FSL_SOC || ARCH_LAYERSCAPE + depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A config JZ4780_NEMC bool "Ingenic JZ4780 SoC NEMC driver" diff --git a/drivers/misc/dummy-irq.c b/drivers/misc/dummy-irq.c index acbbe0390be4..76a1015d5783 100644 --- a/drivers/misc/dummy-irq.c +++ b/drivers/misc/dummy-irq.c @@ -59,6 +59,6 @@ module_exit(dummy_irq_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jiri Kosina"); -module_param(irq, uint, 0444); +module_param_hw(irq, uint, irq, 0444); MODULE_PARM_DESC(irq, "The IRQ to register for"); MODULE_DESCRIPTION("Dummy IRQ handler driver"); diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c index c2e29d7f0de8..a341938c7e2c 100644 --- a/drivers/misc/mic/vop/vop_main.c +++ b/drivers/misc/mic/vop/vop_main.c @@ -278,7 +278,7 @@ static void vop_del_vqs(struct virtio_device *dev) static struct virtqueue *vop_find_vq(struct virtio_device *dev, unsigned index, void (*callback)(struct virtqueue *vq), - const char *name) + const char *name, bool ctx) { struct _vop_vdev *vdev = to_vopvdev(dev); struct vop_device *vpdev = vdev->vpdev; @@ -314,6 +314,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN, dev, false, + ctx, (void __force *)va, vop_notify, callback, name); if (!vq) { err = -ENOMEM; @@ -374,7 +375,8 @@ unmap: static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], struct irq_affinity *desc) + const char * const names[], const bool *ctx, + struct irq_affinity *desc) { struct _vop_vdev *vdev = to_vopvdev(dev); struct vop_device *vpdev = vdev->vpdev; @@ -388,7 +390,8 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs, for (i = 0; i < nvqs; ++i) { dev_dbg(_vop_dev(vdev), "%s: %d: %s\n", __func__, i, names[i]); - vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i]); + vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i], + ctx ? ctx[i] : false); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); goto error; diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index bd04e8bae010..e15a9733fcfd 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -2001,11 +2001,11 @@ static void __exit wbsd_drv_exit(void) module_init(wbsd_drv_init); module_exit(wbsd_drv_exit); #ifdef CONFIG_PNP -module_param_named(nopnp, param_nopnp, uint, 0444); +module_param_hw_named(nopnp, param_nopnp, uint, other, 0444); #endif -module_param_named(io, param_io, uint, 0444); -module_param_named(irq, param_irq, uint, 0444); -module_param_named(dma, param_dma, int, 0444); +module_param_hw_named(io, param_io, uint, ioport, 0444); +module_param_hw_named(irq, param_irq, uint, irq, 0444); +module_param_hw_named(dma, param_dma, int, dma, 0444); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 9dca881bb378..56aa6b75213d 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -323,7 +323,8 @@ static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd) * it should report a size of 8KBytes (0x0020*256). */ cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; - pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name); + pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", + mtd->name); } static void fixup_s29gl064n_sectors(struct mtd_info *mtd) @@ -333,7 +334,8 @@ static void fixup_s29gl064n_sectors(struct mtd_info *mtd) if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { cfi->cfiq->EraseRegionInfo[0] |= 0x0040; - pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name); + pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", + mtd->name); } } @@ -344,7 +346,8 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd) if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; - pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name); + pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", + mtd->name); } } @@ -358,7 +361,8 @@ static void fixup_s29ns512p_sectors(struct mtd_info *mtd) * which is not permitted by CFI. */ cfi->cfiq->EraseRegionInfo[0] = 0x020001ff; - pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name); + pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", + mtd->name); } /* Used to fix CFI-Tables of chips without Extended Query Tables */ diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index aef1846b4de2..5a09a72ab112 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile @@ -17,12 +17,10 @@ obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o obj-$(CONFIG_MTD_PHYSMAP) += physmap.o -ifdef CONFIG_MTD_PHYSMAP_OF_VERSATILE -physmap_of-objs += physmap_of_versatile.o -endif -ifdef CONFIG_MTD_PHYSMAP_OF_GEMINI -physmap_of-objs += physmap_of_gemini.o -endif +physmap_of-objs-y += physmap_of_core.o +physmap_of-objs-$(CONFIG_MTD_PHYSMAP_OF_VERSATILE) += physmap_of_versatile.o +physmap_of-objs-$(CONFIG_MTD_PHYSMAP_OF_GEMINI) += physmap_of_gemini.o +physmap_of-objs := $(physmap_of-objs-y) obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o obj-$(CONFIG_MTD_PISMO) += pismo.o obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of_core.c index 14e8909c9955..62fa6836f218 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of_core.c @@ -116,32 +116,22 @@ static const char * const part_probe_types_def[] = { static const char * const *of_get_probes(struct device_node *dp) { - const char *cp; - int cplen; - unsigned int l; - unsigned int count; const char **res; + int count; - cp = of_get_property(dp, "linux,part-probe", &cplen); - if (cp == NULL) + count = of_property_count_strings(dp, "linux,part-probe"); + if (count < 0) return part_probe_types_def; - count = 0; - for (l = 0; l != cplen; l++) - if (cp[l] == 0) - count++; - - res = kzalloc((count + 1)*sizeof(*res), GFP_KERNEL); + res = kzalloc((count + 1) * sizeof(*res), GFP_KERNEL); if (!res) return NULL; - count = 0; - while (cplen > 0) { - res[count] = cp; - l = strlen(cp) + 1; - cp += l; - cplen -= l; - count++; - } + + count = of_property_read_string_array(dp, "linux,part-probe", res, + count); + if (count < 0) + return NULL; + return res; } diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c index c40e2c951758..f12879a3d4ff 100644 --- a/drivers/mtd/mtdswap.c +++ b/drivers/mtd/mtdswap.c @@ -1235,10 +1235,8 @@ static int mtdswap_show(struct seq_file *s, void *data) if (root->rb_node) { count[i] = d->trees[i].count; - min[i] = rb_entry(rb_first(root), struct swap_eb, - rb)->erase_count; - max[i] = rb_entry(rb_last(root), struct swap_eb, - rb)->erase_count; + min[i] = MTDSWAP_ECNT_MIN(root); + max[i] = MTDSWAP_ECNT_MAX(root); } else count[i] = 0; } diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 6d4d5672d1d8..c3029528063b 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -13,7 +13,6 @@ config MTD_NAND_ECC_SMC menuconfig MTD_NAND tristate "NAND Device Support" depends on MTD - select MTD_NAND_IDS select MTD_NAND_ECC help This enables support for accessing all type of NAND flash @@ -60,17 +59,6 @@ config MTD_NAND_DENALI_DT Enable the driver for NAND flash on platforms using a Denali NAND controller as a DT device. -config MTD_NAND_DENALI_SCRATCH_REG_ADDR - hex "Denali NAND size scratch register address" - default "0xFF108018" - depends on MTD_NAND_DENALI_PCI - help - Some platforms place the NAND chip size in a scratch register - because (some versions of) the driver aren't able to automatically - determine the size of certain chips. Set the address of the - scratch register here to enable this feature. On Intel Moorestown - boards, the scratch register is at 0xFF108018. - config MTD_NAND_GPIO tristate "GPIO assisted NAND Flash driver" depends on GPIOLIB || COMPILE_TEST @@ -109,9 +97,6 @@ config MTD_NAND_OMAP_BCH config MTD_NAND_OMAP_BCH_BUILD def_tristate MTD_NAND_OMAP2 && MTD_NAND_OMAP_BCH -config MTD_NAND_IDS - tristate - config MTD_NAND_RICOH tristate "Ricoh xD card reader" default n @@ -321,11 +306,11 @@ config MTD_NAND_CS553X If you say "m", the module will be called cs553x_nand. config MTD_NAND_ATMEL - tristate "Support for NAND Flash / SmartMedia on AT91 and AVR32" - depends on ARCH_AT91 || AVR32 + tristate "Support for NAND Flash / SmartMedia on AT91" + depends on ARCH_AT91 help Enables support for NAND Flash / Smart Media Card interface - on Atmel AT91 and AVR32 processors. + on Atmel AT91 processors. config MTD_NAND_PXA3xx tristate "NAND support on PXA3xx and Armada 370/XP" @@ -443,7 +428,7 @@ config MTD_NAND_FSL_ELBC config MTD_NAND_FSL_IFC tristate "NAND support for Freescale IFC controller" - depends on FSL_SOC || ARCH_LAYERSCAPE + depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A select FSL_IFC select MEMORY help diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 19a66e404d5b..ade5fc4c3819 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -5,7 +5,6 @@ obj-$(CONFIG_MTD_NAND) += nand.o obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o obj-$(CONFIG_MTD_NAND_BCH) += nand_bch.o -obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o @@ -25,7 +24,7 @@ obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o -obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o +obj-$(CONFIG_MTD_NAND_ATMEL) += atmel/ obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o omap2_nand-objs := omap2.o obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o @@ -61,4 +60,10 @@ obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o -nand-objs := nand_base.o nand_bbt.o nand_timings.o +nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o +nand-objs += nand_amd.o +nand-objs += nand_hynix.o +nand-objs += nand_macronix.o +nand-objs += nand_micron.o +nand-objs += nand_samsung.o +nand-objs += nand_toshiba.o diff --git a/drivers/mtd/nand/atmel/Makefile b/drivers/mtd/nand/atmel/Makefile new file mode 100644 index 000000000000..288db4f38a8f --- /dev/null +++ b/drivers/mtd/nand/atmel/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_MTD_NAND_ATMEL) += atmel-nand-controller.o atmel-pmecc.o + +atmel-nand-controller-objs := nand-controller.o +atmel-pmecc-objs := pmecc.o diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c new file mode 100644 index 000000000000..3b2446896147 --- /dev/null +++ b/drivers/mtd/nand/atmel/nand-controller.c @@ -0,0 +1,2197 @@ +/* + * Copyright 2017 ATMEL + * Copyright 2017 Free Electrons + * + * Author: Boris Brezillon <boris.brezillon@free-electrons.com> + * + * Derived from the atmel_nand.c driver which contained the following + * copyrights: + * + * Copyright 2003 Rick Bronson + * + * Derived from drivers/mtd/nand/autcpu12.c + * Copyright 2001 Thomas Gleixner (gleixner@autronix.de) + * + * Derived from drivers/mtd/spia.c + * Copyright 2000 Steven J. Hill (sjhill@cotw.com) + * + * + * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263 + * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright 2007 + * + * Derived from Das U-Boot source code + * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c) + * Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas + * + * Add Programmable Multibit ECC support for various AT91 SoC + * Copyright 2012 ATMEL, Hong Xu + * + * Add Nand Flash Controller support for SAMA5 SoC + * Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * A few words about the naming convention in this file. This convention + * applies to structure and function names. + * + * Prefixes: + * + * - atmel_nand_: all generic structures/functions + * - atmel_smc_nand_: all structures/functions specific to the SMC interface + * (at91sam9 and avr32 SoCs) + * - atmel_hsmc_nand_: all structures/functions specific to the HSMC interface + * (sama5 SoCs and later) + * - atmel_nfc_: all structures/functions used to manipulate the NFC sub-block + * that is available in the HSMC block + * - <soc>_nand_: all SoC specific structures/functions + */ + +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/genalloc.h> +#include <linux/gpio.h> +#include <linux/gpio/consumer.h> +#include <linux/interrupt.h> +#include <linux/mfd/syscon.h> +#include <linux/mfd/syscon/atmel-matrix.h> +#include <linux/module.h> +#include <linux/mtd/nand.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/iopoll.h> +#include <linux/platform_device.h> +#include <linux/platform_data/atmel.h> +#include <linux/regmap.h> + +#include "pmecc.h" + +#define ATMEL_HSMC_NFC_CFG 0x0 +#define ATMEL_HSMC_NFC_CFG_SPARESIZE(x) (((x) / 4) << 24) +#define ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK GENMASK(30, 24) +#define ATMEL_HSMC_NFC_CFG_DTO(cyc, mul) (((cyc) << 16) | ((mul) << 20)) +#define ATMEL_HSMC_NFC_CFG_DTO_MAX GENMASK(22, 16) +#define ATMEL_HSMC_NFC_CFG_RBEDGE BIT(13) +#define ATMEL_HSMC_NFC_CFG_FALLING_EDGE BIT(12) +#define ATMEL_HSMC_NFC_CFG_RSPARE BIT(9) +#define ATMEL_HSMC_NFC_CFG_WSPARE BIT(8) +#define ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK GENMASK(2, 0) +#define ATMEL_HSMC_NFC_CFG_PAGESIZE(x) (fls((x) / 512) - 1) + +#define ATMEL_HSMC_NFC_CTRL 0x4 +#define ATMEL_HSMC_NFC_CTRL_EN BIT(0) +#define ATMEL_HSMC_NFC_CTRL_DIS BIT(1) + +#define ATMEL_HSMC_NFC_SR 0x8 +#define ATMEL_HSMC_NFC_IER 0xc +#define ATMEL_HSMC_NFC_IDR 0x10 +#define ATMEL_HSMC_NFC_IMR 0x14 +#define ATMEL_HSMC_NFC_SR_ENABLED BIT(1) +#define ATMEL_HSMC_NFC_SR_RB_RISE BIT(4) +#define ATMEL_HSMC_NFC_SR_RB_FALL BIT(5) +#define ATMEL_HSMC_NFC_SR_BUSY BIT(8) +#define ATMEL_HSMC_NFC_SR_WR BIT(11) +#define ATMEL_HSMC_NFC_SR_CSID GENMASK(14, 12) +#define ATMEL_HSMC_NFC_SR_XFRDONE BIT(16) +#define ATMEL_HSMC_NFC_SR_CMDDONE BIT(17) +#define ATMEL_HSMC_NFC_SR_DTOE BIT(20) +#define ATMEL_HSMC_NFC_SR_UNDEF BIT(21) +#define ATMEL_HSMC_NFC_SR_AWB BIT(22) +#define ATMEL_HSMC_NFC_SR_NFCASE BIT(23) +#define ATMEL_HSMC_NFC_SR_ERRORS (ATMEL_HSMC_NFC_SR_DTOE | \ + ATMEL_HSMC_NFC_SR_UNDEF | \ + ATMEL_HSMC_NFC_SR_AWB | \ + ATMEL_HSMC_NFC_SR_NFCASE) +#define ATMEL_HSMC_NFC_SR_RBEDGE(x) BIT((x) + 24) + +#define ATMEL_HSMC_NFC_ADDR 0x18 +#define ATMEL_HSMC_NFC_BANK 0x1c + +#define ATMEL_NFC_MAX_RB_ID 7 + +#define ATMEL_NFC_SRAM_SIZE 0x2400 + +#define ATMEL_NFC_CMD(pos, cmd) ((cmd) << (((pos) * 8) + 2)) +#define ATMEL_NFC_VCMD2 BIT(18) +#define ATMEL_NFC_ACYCLE(naddrs) ((naddrs) << 19) +#define ATMEL_NFC_CSID(cs) ((cs) << 22) +#define ATMEL_NFC_DATAEN BIT(25) +#define ATMEL_NFC_NFCWR BIT(26) + +#define ATMEL_NFC_MAX_ADDR_CYCLES 5 + +#define ATMEL_NAND_ALE_OFFSET BIT(21) +#define ATMEL_NAND_CLE_OFFSET BIT(22) + +#define DEFAULT_TIMEOUT_MS 1000 +#define MIN_DMA_LEN 128 + +enum atmel_nand_rb_type { + ATMEL_NAND_NO_RB, + ATMEL_NAND_NATIVE_RB, + ATMEL_NAND_GPIO_RB, +}; + +struct atmel_nand_rb { + enum atmel_nand_rb_type type; + union { + struct gpio_desc *gpio; + int id; + }; +}; + +struct atmel_nand_cs { + int id; + struct atmel_nand_rb rb; + struct gpio_desc *csgpio; + struct { + void __iomem *virt; + dma_addr_t dma; + } io; +}; + +struct atmel_nand { + struct list_head node; + struct device *dev; + struct nand_chip base; + struct atmel_nand_cs *activecs; + struct atmel_pmecc_user *pmecc; + struct gpio_desc *cdgpio; + int numcs; + struct atmel_nand_cs cs[]; +}; + +static inline struct atmel_nand *to_atmel_nand(struct nand_chip *chip) +{ + return container_of(chip, struct atmel_nand, base); +} + +enum atmel_nfc_data_xfer { + ATMEL_NFC_NO_DATA, + ATMEL_NFC_READ_DATA, + ATMEL_NFC_WRITE_DATA, +}; + +struct atmel_nfc_op { + u8 cs; + u8 ncmds; + u8 cmds[2]; + u8 naddrs; + u8 addrs[5]; + enum atmel_nfc_data_xfer data; + u32 wait; + u32 errors; +}; + +struct atmel_nand_controller; +struct atmel_nand_controller_caps; + +struct atmel_nand_controller_ops { + int (*probe)(struct platform_device *pdev, + const struct atmel_nand_controller_caps *caps); + int (*remove)(struct atmel_nand_controller *nc); + void (*nand_init)(struct atmel_nand_controller *nc, + struct atmel_nand *nand); + int (*ecc_init)(struct atmel_nand *nand); +}; + +struct atmel_nand_controller_caps { + bool has_dma; + bool legacy_of_bindings; + u32 ale_offs; + u32 cle_offs; + const struct atmel_nand_controller_ops *ops; +}; + +struct atmel_nand_controller { + struct nand_hw_control base; + const struct atmel_nand_controller_caps *caps; + struct device *dev; + struct regmap *smc; + struct dma_chan *dmac; + struct atmel_pmecc *pmecc; + struct list_head chips; + struct clk *mck; +}; + +static inline struct atmel_nand_controller * +to_nand_controller(struct nand_hw_control *ctl) +{ + return container_of(ctl, struct atmel_nand_controller, base); +} + +struct atmel_smc_nand_controller { + struct atmel_nand_controller base; + struct regmap *matrix; + unsigned int ebi_csa_offs; +}; + +static inline struct atmel_smc_nand_controller * +to_smc_nand_controller(struct nand_hw_control *ctl) +{ + return container_of(to_nand_controller(ctl), + struct atmel_smc_nand_controller, base); +} + +struct atmel_hsmc_nand_controller { + struct atmel_nand_controller base; + struct { + struct gen_pool *pool; + void __iomem *virt; + dma_addr_t dma; + } sram; + struct regmap *io; + struct atmel_nfc_op op; + struct completion complete; + int irq; + + /* Only used when instantiating from legacy DT bindings. */ + struct clk *clk; +}; + +static inline struct atmel_hsmc_nand_controller * +to_hsmc_nand_controller(struct nand_hw_control *ctl) +{ + return container_of(to_nand_controller(ctl), + struct atmel_hsmc_nand_controller, base); +} + +static bool atmel_nfc_op_done(struct atmel_nfc_op *op, u32 status) +{ + op->errors |= status & ATMEL_HSMC_NFC_SR_ERRORS; + op->wait ^= status & op->wait; + + return !op->wait || op->errors; +} + +static irqreturn_t atmel_nfc_interrupt(int irq, void *data) +{ + struct atmel_hsmc_nand_controller *nc = data; + u32 sr, rcvd; + bool done; + + regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &sr); + + rcvd = sr & (nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS); + done = atmel_nfc_op_done(&nc->op, sr); + + if (rcvd) + regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, rcvd); + + if (done) + complete(&nc->complete); + + return rcvd ? IRQ_HANDLED : IRQ_NONE; +} + +static int atmel_nfc_wait(struct atmel_hsmc_nand_controller *nc, bool poll, + unsigned int timeout_ms) +{ + int ret; + + if (!timeout_ms) + timeout_ms = DEFAULT_TIMEOUT_MS; + + if (poll) { + u32 status; + + ret = regmap_read_poll_timeout(nc->base.smc, + ATMEL_HSMC_NFC_SR, status, + atmel_nfc_op_done(&nc->op, + status), + 0, timeout_ms * 1000); + } else { + init_completion(&nc->complete); + regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IER, + nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS); + ret = wait_for_completion_timeout(&nc->complete, + msecs_to_jiffies(timeout_ms)); + if (!ret) + ret = -ETIMEDOUT; + else + ret = 0; + + regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff); + } + + if (nc->op.errors & ATMEL_HSMC_NFC_SR_DTOE) { + dev_err(nc->base.dev, "Waiting NAND R/B Timeout\n"); + ret = -ETIMEDOUT; + } + + if (nc->op.errors & ATMEL_HSMC_NFC_SR_UNDEF) { + dev_err(nc->base.dev, "Access to an undefined area\n"); + ret = -EIO; + } + + if (nc->op.errors & ATMEL_HSMC_NFC_SR_AWB) { + dev_err(nc->base.dev, "Access while busy\n"); + ret = -EIO; + } + + if (nc->op.errors & ATMEL_HSMC_NFC_SR_NFCASE) { + dev_err(nc->base.dev, "Wrong access size\n"); + ret = -EIO; + } + + return ret; +} + +static void atmel_nand_dma_transfer_finished(void *data) +{ + struct completion *finished = data; + + complete(finished); +} + +static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc, + void *buf, dma_addr_t dev_dma, size_t len, + enum dma_data_direction dir) +{ + DECLARE_COMPLETION_ONSTACK(finished); + dma_addr_t src_dma, dst_dma, buf_dma; + struct dma_async_tx_descriptor *tx; + dma_cookie_t cookie; + + buf_dma = dma_map_single(nc->dev, buf, len, dir); + if (dma_mapping_error(nc->dev, dev_dma)) { + dev_err(nc->dev, + "Failed to prepare a buffer for DMA access\n"); + goto err; + } + + if (dir == DMA_FROM_DEVICE) { + src_dma = dev_dma; + dst_dma = buf_dma; + } else { + src_dma = buf_dma; + dst_dma = dev_dma; + } + + tx = dmaengine_prep_dma_memcpy(nc->dmac, dst_dma, src_dma, len, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + if (!tx) { + dev_err(nc->dev, "Failed to prepare DMA memcpy\n"); + goto err_unmap; + } + + tx->callback = atmel_nand_dma_transfer_finished; + tx->callback_param = &finished; + + cookie = dmaengine_submit(tx); + if (dma_submit_error(cookie)) { + dev_err(nc->dev, "Failed to do DMA tx_submit\n"); + goto err_unmap; + } + + dma_async_issue_pending(nc->dmac); + wait_for_completion(&finished); + + return 0; + +err_unmap: + dma_unmap_single(nc->dev, buf_dma, len, dir); + +err: + dev_dbg(nc->dev, "Fall back to CPU I/O\n"); + + return -EIO; +} + +static u8 atmel_nand_read_byte(struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct atmel_nand *nand = to_atmel_nand(chip); + + return ioread8(nand->activecs->io.virt); +} + +static u16 atmel_nand_read_word(struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct atmel_nand *nand = to_atmel_nand(chip); + + return ioread16(nand->activecs->io.virt); +} + +static void atmel_nand_write_byte(struct mtd_info *mtd, u8 byte) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct atmel_nand *nand = to_atmel_nand(chip); + + if (chip->options & NAND_BUSWIDTH_16) + iowrite16(byte | (byte << 8), nand->activecs->io.virt); + else + iowrite8(byte, nand->activecs->io.virt); +} + +static void atmel_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct atmel_nand *nand = to_atmel_nand(chip); + struct atmel_nand_controller *nc; + + nc = to_nand_controller(chip->controller); + + /* + * If the controller supports DMA, the buffer address is DMA-able and + * len is long enough to make DMA transfers profitable, let's trigger + * a DMA transfer. If it fails, fallback to PIO mode. + */ + if (nc->dmac && virt_addr_valid(buf) && + len >= MIN_DMA_LEN && + !atmel_nand_dma_transfer(nc, buf, nand->activecs->io.dma, len, + DMA_FROM_DEVICE)) + return; + + if (chip->options & NAND_BUSWIDTH_16) + ioread16_rep(nand->activecs->io.virt, buf, len / 2); + else + ioread8_rep(nand->activecs->io.virt, buf, len); +} + +static void atmel_nand_write_buf(struct mtd_info *mtd, const u8 *buf, int len) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct atmel_nand *nand = to_atmel_nand(chip); + struct atmel_nand_controller *nc; + + nc = to_nand_controller(chip->controller); + + /* + * If the controller supports DMA, the buffer address is DMA-able and + * len is long enough to make DMA transfers profitable, let's trigger + * a DMA transfer. If it fails, fallback to PIO mode. + */ + if (nc->dmac && virt_addr_valid(buf) && + len >= MIN_DMA_LEN && + !atmel_nand_dma_transfer(nc, (void *)buf, nand->activecs->io.dma, + len, DMA_TO_DEVICE)) + return; + + if (chip->options & NAND_BUSWIDTH_16) + iowrite16_rep(nand->activecs->io.virt, buf, len / 2); + else + iowrite8_rep(nand->activecs->io.virt, buf, len); +} + +static int atmel_nand_dev_ready(struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct atmel_nand *nand = to_atmel_nand(chip); + + return gpiod_get_value(nand->activecs->rb.gpio); +} + +static void atmel_nand_select_chip(struct mtd_info *mtd, int cs) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct atmel_nand *nand = to_atmel_nand(chip); + + if (cs < 0 || cs >= nand->numcs) { + nand->activecs = NULL; + chip->dev_ready = NULL; + return; + } + + nand->activecs = &nand->cs[cs]; + + if (nand->activecs->rb.type == ATMEL_NAND_GPIO_RB) + chip->dev_ready = atmel_nand_dev_ready; +} + +static int atmel_hsmc_nand_dev_ready(struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct atmel_nand *nand = to_atmel_nand(chip); + struct atmel_hsmc_nand_controller *nc; + u32 status; + + nc = to_hsmc_nand_controller(chip->controller); + + regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &status); + + return status & ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id); +} + +static void atmel_hsmc_nand_select_chip(struct mtd_info *mtd, int cs) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct atmel_nand *nand = to_atmel_nand(chip); + struct atmel_hsmc_nand_controller *nc; + + nc = to_hsmc_nand_controller(chip->controller); + + atmel_nand_select_chip(mtd, cs); + + if (!nand->activecs) { + regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL, + ATMEL_HSMC_NFC_CTRL_DIS); + return; + } + + if (nand->activecs->rb.type == ATMEL_NAND_NATIVE_RB) + chip->dev_ready = atmel_hsmc_nand_dev_ready; + + regmap_update_bits(nc->base.smc, ATMEL_HSMC_NFC_CFG, + ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK | + ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK | + ATMEL_HSMC_NFC_CFG_RSPARE | + ATMEL_HSMC_NFC_CFG_WSPARE, + ATMEL_HSMC_NFC_CFG_PAGESIZE(mtd->writesize) | + ATMEL_HSMC_NFC_CFG_SPARESIZE(mtd->oobsize) | + ATMEL_HSMC_NFC_CFG_RSPARE); + regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL, + ATMEL_HSMC_NFC_CTRL_EN); +} + +static int atmel_nfc_exec_op(struct atmel_hsmc_nand_controller *nc, bool poll) +{ + u8 *addrs = nc->op.addrs; + unsigned int op = 0; + u32 addr, val; + int i, ret; + + nc->op.wait = ATMEL_HSMC_NFC_SR_CMDDONE; + + for (i = 0; i < nc->op.ncmds; i++) + op |= ATMEL_NFC_CMD(i, nc->op.cmds[i]); + + if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES) + regmap_write(nc->base.smc, ATMEL_HSMC_NFC_ADDR, *addrs++); + + op |= ATMEL_NFC_CSID(nc->op.cs) | + ATMEL_NFC_ACYCLE(nc->op.naddrs); + + if (nc->op.ncmds > 1) + op |= ATMEL_NFC_VCMD2; + + addr = addrs[0] | (addrs[1] << 8) | (addrs[2] << 16) | + (addrs[3] << 24); + + if (nc->op.data != ATMEL_NFC_NO_DATA) { + op |= ATMEL_NFC_DATAEN; + nc->op.wait |= ATMEL_HSMC_NFC_SR_XFRDONE; + + if (nc->op.data == ATMEL_NFC_WRITE_DATA) + op |= ATMEL_NFC_NFCWR; + } + + /* Clear all flags. */ + regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &val); + + /* Send the command. */ + regmap_write(nc->io, op, addr); + + ret = atmel_nfc_wait(nc, poll, 0); + if (ret) + dev_err(nc->base.dev, + "Failed to send NAND command (err = %d)!", + ret); + + /* Reset the op state. */ + memset(&nc->op, 0, sizeof(nc->op)); + + return ret; +} + +static void atmel_hsmc_nand_cmd_ctrl(struct mtd_info *mtd, int dat, + unsigned int ctrl) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct atmel_nand *nand = to_atmel_nand(chip); + struct atmel_hsmc_nand_controller *nc; + + nc = to_hsmc_nand_controller(chip->controller); + + if (ctrl & NAND_ALE) { + if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES) + return; + + nc->op.addrs[nc->op.naddrs++] = dat; + } else if (ctrl & NAND_CLE) { + if (nc->op.ncmds > 1) + return; + + nc->op.cmds[nc->op.ncmds++] = dat; + } + + if (dat == NAND_CMD_NONE) { + nc->op.cs = nand->activecs->id; + atmel_nfc_exec_op(nc, true); + } +} + +static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, + unsigned int ctrl) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct atmel_nand *nand = to_atmel_nand(chip); + struct atmel_nand_controller *nc; + + nc = to_nand_controller(chip->controller); + + if ((ctrl & NAND_CTRL_CHANGE) && nand->activecs->csgpio) { + if (ctrl & NAND_NCE) + gpiod_set_value(nand->activecs->csgpio, 0); + else + gpiod_set_value(nand->activecs->csgpio, 1); + } + + if (ctrl & NAND_ALE) + writeb(cmd, nand->activecs->io.virt + nc->caps->ale_offs); + else if (ctrl & NAND_CLE) + writeb(cmd, nand->activecs->io.virt + nc->caps->cle_offs); +} + +static void atmel_nfc_copy_to_sram(struct nand_chip *chip, const u8 *buf, + bool oob_required) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct atmel_hsmc_nand_controller *nc; + int ret = -EIO; + + nc = to_hsmc_nand_controller(chip->controller); + + if (nc->base.dmac) + ret = atmel_nand_dma_transfer(&nc->base, (void *)buf, + nc->sram.dma, mtd->writesize, + DMA_TO_DEVICE); + + /* Falling back to CPU copy. */ + if (ret) + memcpy_toio(nc->sram.virt, buf, mtd->writesize); + + if (oob_required) + memcpy_toio(nc->sram.virt + mtd->writesize, chip->oob_poi, + mtd->oobsize); +} + +static void atmel_nfc_copy_from_sram(struct nand_chip *chip, u8 *buf, + bool oob_required) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct atmel_hsmc_nand_controller *nc; + int ret = -EIO; + + nc = to_hsmc_nand_controller(chip->controller); + + if (nc->base.dmac) + ret = atmel_nand_dma_transfer(&nc->base, buf, nc->sram.dma, + mtd->writesize, DMA_FROM_DEVICE); + + /* Falling back to CPU copy. */ + if (ret) + memcpy_fromio(buf, nc->sram.virt, mtd->writesize); + + if (oob_required) + memcpy_fromio(chip->oob_poi, nc->sram.virt + mtd->writesize, + mtd->oobsize); +} + +static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct atmel_hsmc_nand_controller *nc; + + nc = to_hsmc_nand_controller(chip->controller); + + if (column >= 0) { + nc->op.addrs[nc->op.naddrs++] = column; + + /* + * 2 address cycles for the column offset on large page NANDs. + */ + if (mtd->writesize > 512) + nc->op.addrs[nc->op.naddrs++] = column >> 8; + } + + if (page >= 0) { + nc->op.addrs[nc->op.naddrs++] = page; + nc->op.addrs[nc->op.naddrs++] = page >> 8; + + if ((mtd->writesize > 512 && chip->chipsize > SZ_128M) || + (mtd->writesize <= 512 && chip->chipsize > SZ_32M)) + nc->op.addrs[nc->op.naddrs++] = page >> 16; + } +} + +static int atmel_nand_pmecc_enable(struct nand_chip *chip, int op, bool raw) +{ + struct atmel_nand *nand = to_atmel_nand(chip); + struct atmel_nand_controller *nc; + int ret; + + nc = to_nand_controller(chip->controller); + + if (raw) + return 0; + + ret = atmel_pmecc_enable(nand->pmecc, op); + if (ret) + dev_err(nc->dev, + "Failed to enable ECC engine (err = %d)\n", ret); + + return ret; +} + +static void atmel_nand_pmecc_disable(struct nand_chip *chip, bool raw) +{ + struct atmel_nand *nand = to_atmel_nand(chip); + + if (!raw) + atmel_pmecc_disable(nand->pmecc); +} + +static int atmel_nand_pmecc_generate_eccbytes(struct nand_chip *chip, bool raw) +{ + struct atmel_nand *nand = to_atmel_nand(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + struct atmel_nand_controller *nc; + struct mtd_oob_region oobregion; + void *eccbuf; + int ret, i; + + nc = to_nand_controller(chip->controller); + + if (raw) + return 0; + + ret = atmel_pmecc_wait_rdy(nand->pmecc); + if (ret) { + dev_err(nc->dev, + "Failed to transfer NAND page data (err = %d)\n", + ret); + return ret; + } + + mtd_ooblayout_ecc(mtd, 0, &oobregion); + eccbuf = chip->oob_poi + oobregion.offset; + + for (i = 0; i < chip->ecc.steps; i++) { + atmel_pmecc_get_generated_eccbytes(nand->pmecc, i, + eccbuf); + eccbuf += chip->ecc.bytes; + } + + return 0; +} + +static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf, + bool raw) +{ + struct atmel_nand *nand = to_atmel_nand(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + struct atmel_nand_controller *nc; + struct mtd_oob_region oobregion; + int ret, i, max_bitflips = 0; + void *databuf, *eccbuf; + + nc = to_nand_controller(chip->controller); + + if (raw) + return 0; + + ret = atmel_pmecc_wait_rdy(nand->pmecc); + if (ret) { + dev_err(nc->dev, + "Failed to read NAND page data (err = %d)\n", + ret); + return ret; + } + + mtd_ooblayout_ecc(mtd, 0, &oobregion); + eccbuf = chip->oob_poi + oobregion.offset; + databuf = buf; + + for (i = 0; i < chip->ecc.steps; i++) { + ret = atmel_pmecc_correct_sector(nand->pmecc, i, databuf, + eccbuf); + if (ret < 0 && !atmel_pmecc_correct_erased_chunks(nand->pmecc)) + ret = nand_check_erased_ecc_chunk(databuf, + chip->ecc.size, + eccbuf, + chip->ecc.bytes, + NULL, 0, + chip->ecc.strength); + + if (ret >= 0) + max_bitflips = max(ret, max_bitflips); + else + mtd->ecc_stats.failed++; + + databuf += chip->ecc.size; + eccbuf += chip->ecc.bytes; + } + + return max_bitflips; +} + +static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf, + bool oob_required, int page, bool raw) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct atmel_nand *nand = to_atmel_nand(chip); + int ret; + + ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw); + if (ret) + return ret; + + atmel_nand_write_buf(mtd, buf, mtd->writesize); + + ret = atmel_nand_pmecc_generate_eccbytes(chip, raw); + if (ret) { + atmel_pmecc_disable(nand->pmecc); + return ret; + } + + atmel_nand_pmecc_disable(chip, raw); + + atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); + + return 0; +} + +static int atmel_nand_pmecc_write_page(struct mtd_info *mtd, + struct nand_chip *chip, const u8 *buf, + int oob_required, int page) +{ + return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, false); +} + +static int atmel_nand_pmecc_write_page_raw(struct mtd_info *mtd, + struct nand_chip *chip, + const u8 *buf, int oob_required, + int page) +{ + return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, true); +} + +static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf, + bool oob_required, int page, bool raw) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; + + ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw); + if (ret) + return ret; + + atmel_nand_read_buf(mtd, buf, mtd->writesize); + atmel_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize); + + ret = atmel_nand_pmecc_correct_data(chip, buf, raw); + + atmel_nand_pmecc_disable(chip, raw); + + return ret; +} + +static int atmel_nand_pmecc_read_page(struct mtd_info *mtd, + struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, false); +} + +static int atmel_nand_pmecc_read_page_raw(struct mtd_info *mtd, + struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, true); +} + +static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip, + const u8 *buf, bool oob_required, + int page, bool raw) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct atmel_nand *nand = to_atmel_nand(chip); + struct atmel_hsmc_nand_controller *nc; + int ret; + + nc = to_hsmc_nand_controller(chip->controller); + + atmel_nfc_copy_to_sram(chip, buf, false); + + nc->op.cmds[0] = NAND_CMD_SEQIN; + nc->op.ncmds = 1; + atmel_nfc_set_op_addr(chip, page, 0x0); + nc->op.cs = nand->activecs->id; + nc->op.data = ATMEL_NFC_WRITE_DATA; + + ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw); + if (ret) + return ret; + + ret = atmel_nfc_exec_op(nc, false); + if (ret) { + atmel_nand_pmecc_disable(chip, raw); + dev_err(nc->base.dev, + "Failed to transfer NAND page data (err = %d)\n", + ret); + return ret; + } + + ret = atmel_nand_pmecc_generate_eccbytes(chip, raw); + + atmel_nand_pmecc_disable(chip, raw); + + if (ret) + return ret; + + atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); + + nc->op.cmds[0] = NAND_CMD_PAGEPROG; + nc->op.ncmds = 1; + nc->op.cs = nand->activecs->id; + ret = atmel_nfc_exec_op(nc, false); + if (ret) + dev_err(nc->base.dev, "Failed to program NAND page (err = %d)\n", + ret); + + return ret; +} + +static int atmel_hsmc_nand_pmecc_write_page(struct mtd_info *mtd, + struct nand_chip *chip, + const u8 *buf, int oob_required, + int page) +{ + return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page, + false); +} + +static int atmel_hsmc_nand_pmecc_write_page_raw(struct mtd_info *mtd, + struct nand_chip *chip, + const u8 *buf, + int oob_required, int page) +{ + return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page, + true); +} + +static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf, + bool oob_required, int page, + bool raw) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct atmel_nand *nand = to_atmel_nand(chip); + struct atmel_hsmc_nand_controller *nc; + int ret; + + nc = to_hsmc_nand_controller(chip->controller); + + /* + * Optimized read page accessors only work when the NAND R/B pin is + * connected to a native SoC R/B pin. If that's not the case, fallback + * to the non-optimized one. + */ + if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB) { + chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); + + return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, + raw); + } + + nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READ0; + + if (mtd->writesize > 512) + nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READSTART; + + atmel_nfc_set_op_addr(chip, page, 0x0); + nc->op.cs = nand->activecs->id; + nc->op.data = ATMEL_NFC_READ_DATA; + + ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw); + if (ret) + return ret; + + ret = atmel_nfc_exec_op(nc, false); + if (ret) { + atmel_nand_pmecc_disable(chip, raw); + dev_err(nc->base.dev, + "Failed to load NAND page data (err = %d)\n", + ret); + return ret; + } + + atmel_nfc_copy_from_sram(chip, buf, true); + + ret = atmel_nand_pmecc_correct_data(chip, buf, raw); + + atmel_nand_pmecc_disable(chip, raw); + + return ret; +} + +static int atmel_hsmc_nand_pmecc_read_page(struct mtd_info *mtd, + struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page, + false); +} + +static int atmel_hsmc_nand_pmecc_read_page_raw(struct mtd_info *mtd, + struct nand_chip *chip, + u8 *buf, int oob_required, + int page) +{ + return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page, + true); +} + +static int atmel_nand_pmecc_init(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct atmel_nand *nand = to_atmel_nand(chip); + struct atmel_nand_controller *nc; + struct atmel_pmecc_user_req req; + + nc = to_nand_controller(chip->controller); + + if (!nc->pmecc) { + dev_err(nc->dev, "HW ECC not supported\n"); + return -ENOTSUPP; + } + + if (nc->caps->legacy_of_bindings) { + u32 val; + + if (!of_property_read_u32(nc->dev->of_node, "atmel,pmecc-cap", + &val)) + chip->ecc.strength = val; + + if (!of_property_read_u32(nc->dev->of_node, + "atmel,pmecc-sector-size", + &val)) + chip->ecc.size = val; + } + + if (chip->ecc.options & NAND_ECC_MAXIMIZE) + req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH; + else if (chip->ecc.strength) + req.ecc.strength = chip->ecc.strength; + else if (chip->ecc_strength_ds) + req.ecc.strength = chip->ecc_strength_ds; + else + req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH; + + if (chip->ecc.size) + req.ecc.sectorsize = chip->ecc.size; + else if (chip->ecc_step_ds) + req.ecc.sectorsize = chip->ecc_step_ds; + else + req.ecc.sectorsize = ATMEL_PMECC_SECTOR_SIZE_AUTO; + + req.pagesize = mtd->writesize; + req.oobsize = mtd->oobsize; + + if (mtd->writesize <= 512) { + req.ecc.bytes = 4; + req.ecc.ooboffset = 0; + } else { + req.ecc.bytes = mtd->oobsize - 2; + req.ecc.ooboffset = ATMEL_PMECC_OOBOFFSET_AUTO; + } + + nand->pmecc = atmel_pmecc_create_user(nc->pmecc, &req); + if (IS_ERR(nand->pmecc)) + return PTR_ERR(nand->pmecc); + + chip->ecc.algo = NAND_ECC_BCH; + chip->ecc.size = req.ecc.sectorsize; + chip->ecc.bytes = req.ecc.bytes / req.ecc.nsectors; + chip->ecc.strength = req.ecc.strength; + + chip->options |= NAND_NO_SUBPAGE_WRITE; + + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); + + return 0; +} + +static int atmel_nand_ecc_init(struct atmel_nand *nand) +{ + struct nand_chip *chip = &nand->base; + struct atmel_nand_controller *nc; + int ret; + + nc = to_nand_controller(chip->controller); + + switch (chip->ecc.mode) { + case NAND_ECC_NONE: + case NAND_ECC_SOFT: + /* + * Nothing to do, the core will initialize everything for us. + */ + break; + + case NAND_ECC_HW: + ret = atmel_nand_pmecc_init(chip); + if (ret) + return ret; + + chip->ecc.read_page = atmel_nand_pmecc_read_page; + chip->ecc.write_page = atmel_nand_pmecc_write_page; + chip->ecc.read_page_raw = atmel_nand_pmecc_read_page_raw; + chip->ecc.write_page_raw = atmel_nand_pmecc_write_page_raw; + break; + + default: + /* Other modes are not supported. */ + dev_err(nc->dev, "Unsupported ECC mode: %d\n", + chip->ecc.mode); + return -ENOTSUPP; + } + + return 0; +} + +static int atmel_hsmc_nand_ecc_init(struct atmel_nand *nand) +{ + struct nand_chip *chip = &nand->base; + int ret; + + ret = atmel_nand_ecc_init(nand); + if (ret) + return ret; + + if (chip->ecc.mode != NAND_ECC_HW) + return 0; + + /* Adjust the ECC operations for the HSMC IP. */ + chip->ecc.read_page = atmel_hsmc_nand_pmecc_read_page; + chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page; + chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw; + chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw; + chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS; + + return 0; +} + +static void atmel_nand_init(struct atmel_nand_controller *nc, + struct atmel_nand *nand) +{ + struct nand_chip *chip = &nand->base; + struct mtd_info *mtd = nand_to_mtd(chip); + + mtd->dev.parent = nc->dev; + nand->base.controller = &nc->base; + + chip->cmd_ctrl = atmel_nand_cmd_ctrl; + chip->read_byte = atmel_nand_read_byte; + chip->read_word = atmel_nand_read_word; + chip->write_byte = atmel_nand_write_byte; + chip->read_buf = atmel_nand_read_buf; + chip->write_buf = atmel_nand_write_buf; + chip->select_chip = atmel_nand_select_chip; + + /* Some NANDs require a longer delay than the default one (20us). */ + chip->chip_delay = 40; + + /* + * Use a bounce buffer when the buffer passed by the MTD user is not + * suitable for DMA. + */ + if (nc->dmac) + chip->options |= NAND_USE_BOUNCE_BUFFER; + + /* Default to HW ECC if pmecc is available. */ + if (nc->pmecc) + chip->ecc.mode = NAND_ECC_HW; +} + +static void atmel_smc_nand_init(struct atmel_nand_controller *nc, + struct atmel_nand *nand) +{ + struct nand_chip *chip = &nand->base; + struct atmel_smc_nand_controller *smc_nc; + int i; + + atmel_nand_init(nc, nand); + + smc_nc = to_smc_nand_controller(chip->controller); + if (!smc_nc->matrix) + return; + + /* Attach the CS to the NAND Flash logic. */ + for (i = 0; i < nand->numcs; i++) + regmap_update_bits(smc_nc->matrix, smc_nc->ebi_csa_offs, + BIT(nand->cs[i].id), BIT(nand->cs[i].id)); +} + +static void atmel_hsmc_nand_init(struct atmel_nand_controller *nc, + struct atmel_nand *nand) +{ + struct nand_chip *chip = &nand->base; + + atmel_nand_init(nc, nand); + + /* Overload some methods for the HSMC controller. */ + chip->cmd_ctrl = atmel_hsmc_nand_cmd_ctrl; + chip->select_chip = atmel_hsmc_nand_select_chip; +} + +static int atmel_nand_detect(struct atmel_nand *nand) +{ + struct nand_chip *chip = &nand->base; + struct mtd_info *mtd = nand_to_mtd(chip); + struct atmel_nand_controller *nc; + int ret; + + nc = to_nand_controller(chip->controller); + + ret = nand_scan_ident(mtd, nand->numcs, NULL); + if (ret) + dev_err(nc->dev, "nand_scan_ident() failed: %d\n", ret); + + return ret; +} + +static int atmel_nand_unregister(struct atmel_nand *nand) +{ + struct nand_chip *chip = &nand->base; + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; + + ret = mtd_device_unregister(mtd); + if (ret) + return ret; + + nand_cleanup(chip); + list_del(&nand->node); + + return 0; +} + +static int atmel_nand_register(struct atmel_nand *nand) +{ + struct nand_chip *chip = &nand->base; + struct mtd_info *mtd = nand_to_mtd(chip); + struct atmel_nand_controller *nc; + int ret; + + nc = to_nand_controller(chip->controller); + + if (nc->caps->legacy_of_bindings || !nc->dev->of_node) { + /* + * We keep the MTD name unchanged to avoid breaking platforms + * where the MTD cmdline parser is used and the bootloader + * has not been updated to use the new naming scheme. + */ + mtd->name = "atmel_nand"; + } else if (!mtd->name) { + /* + * If the new bindings are used and the bootloader has not been + * updated to pass a new mtdparts parameter on the cmdline, you + * should define the following property in your nand node: + * + * label = "atmel_nand"; + * + * This way, mtd->name will be set by the core when + * nand_set_flash_node() is called. + */ + mtd->name = devm_kasprintf(nc->dev, GFP_KERNEL, + "%s:nand.%d", dev_name(nc->dev), + nand->cs[0].id); + if (!mtd->name) { + dev_err(nc->dev, "Failed to allocate mtd->name\n"); + return -ENOMEM; + } + } + + ret = nand_scan_tail(mtd); + if (ret) { + dev_err(nc->dev, "nand_scan_tail() failed: %d\n", ret); + return ret; + } + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + dev_err(nc->dev, "Failed to register mtd device: %d\n", ret); + nand_cleanup(chip); + return ret; + } + + list_add_tail(&nand->node, &nc->chips); + + return 0; +} + +static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc, + struct device_node *np, + int reg_cells) +{ + struct atmel_nand *nand; + struct gpio_desc *gpio; + int numcs, ret, i; + + numcs = of_property_count_elems_of_size(np, "reg", + reg_cells * sizeof(u32)); + if (numcs < 1) { + dev_err(nc->dev, "Missing or invalid reg property\n"); + return ERR_PTR(-EINVAL); + } + + nand = devm_kzalloc(nc->dev, + sizeof(*nand) + (numcs * sizeof(*nand->cs)), + GFP_KERNEL); + if (!nand) { + dev_err(nc->dev, "Failed to allocate NAND object\n"); + return ERR_PTR(-ENOMEM); + } + + nand->numcs = numcs; + + gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "det", 0, + &np->fwnode, GPIOD_IN, + "nand-det"); + if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) { + dev_err(nc->dev, + "Failed to get detect gpio (err = %ld)\n", + PTR_ERR(gpio)); + return ERR_CAST(gpio); + } + + if (!IS_ERR(gpio)) + nand->cdgpio = gpio; + + for (i = 0; i < numcs; i++) { + struct resource res; + u32 val; + + ret = of_address_to_resource(np, 0, &res); + if (ret) { + dev_err(nc->dev, "Invalid reg property (err = %d)\n", + ret); + return ERR_PTR(ret); + } + + ret = of_property_read_u32_index(np, "reg", i * reg_cells, + &val); + if (ret) { + dev_err(nc->dev, "Invalid reg property (err = %d)\n", + ret); + return ERR_PTR(ret); + } + + nand->cs[i].id = val; + + nand->cs[i].io.dma = res.start; + nand->cs[i].io.virt = devm_ioremap_resource(nc->dev, &res); + if (IS_ERR(nand->cs[i].io.virt)) + return ERR_CAST(nand->cs[i].io.virt); + + if (!of_property_read_u32(np, "atmel,rb", &val)) { + if (val > ATMEL_NFC_MAX_RB_ID) + return ERR_PTR(-EINVAL); + + nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB; + nand->cs[i].rb.id = val; + } else { + gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, + "rb", i, &np->fwnode, + GPIOD_IN, "nand-rb"); + if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) { + dev_err(nc->dev, + "Failed to get R/B gpio (err = %ld)\n", + PTR_ERR(gpio)); + return ERR_CAST(gpio); + } + + if (!IS_ERR(gpio)) { + nand->cs[i].rb.type = ATMEL_NAND_GPIO_RB; + nand->cs[i].rb.gpio = gpio; + } + } + + gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "cs", + i, &np->fwnode, + GPIOD_OUT_HIGH, + "nand-cs"); + if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) { + dev_err(nc->dev, + "Failed to get CS gpio (err = %ld)\n", + PTR_ERR(gpio)); + return ERR_CAST(gpio); + } + + if (!IS_ERR(gpio)) + nand->cs[i].csgpio = gpio; + } + + nand_set_flash_node(&nand->base, np); + + return nand; +} + +static int +atmel_nand_controller_add_nand(struct atmel_nand_controller *nc, + struct atmel_nand *nand) +{ + int ret; + + /* No card inserted, skip this NAND. */ + if (nand->cdgpio && gpiod_get_value(nand->cdgpio)) { + dev_info(nc->dev, "No SmartMedia card inserted.\n"); + return 0; + } + + nc->caps->ops->nand_init(nc, nand); + + ret = atmel_nand_detect(nand); + if (ret) + return ret; + + ret = nc->caps->ops->ecc_init(nand); + if (ret) + return ret; + + return atmel_nand_register(nand); +} + +static int +atmel_nand_controller_remove_nands(struct atmel_nand_controller *nc) +{ + struct atmel_nand *nand, *tmp; + int ret; + + list_for_each_entry_safe(nand, tmp, &nc->chips, node) { + ret = atmel_nand_unregister(nand); + if (ret) + return ret; + } + + return 0; +} + +static int +atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc) +{ + struct device *dev = nc->dev; + struct platform_device *pdev = to_platform_device(dev); + struct atmel_nand *nand; + struct gpio_desc *gpio; + struct resource *res; + + /* + * Legacy bindings only allow connecting a single NAND with a unique CS + * line to the controller. + */ + nand = devm_kzalloc(nc->dev, sizeof(*nand) + sizeof(*nand->cs), + GFP_KERNEL); + if (!nand) + return -ENOMEM; + + nand->numcs = 1; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + nand->cs[0].io.virt = devm_ioremap_resource(dev, res); + if (IS_ERR(nand->cs[0].io.virt)) + return PTR_ERR(nand->cs[0].io.virt); + + nand->cs[0].io.dma = res->start; + + /* + * The old driver was hardcoding the CS id to 3 for all sama5 + * controllers. Since this id is only meaningful for the sama5 + * controller we can safely assign this id to 3 no matter the + * controller. + * If one wants to connect a NAND to a different CS line, he will + * have to use the new bindings. + */ + nand->cs[0].id = 3; + + /* R/B GPIO. */ + gpio = devm_gpiod_get_index_optional(dev, NULL, 0, GPIOD_IN); + if (IS_ERR(gpio)) { + dev_err(dev, "Failed to get R/B gpio (err = %ld)\n", + PTR_ERR(gpio)); + return PTR_ERR(gpio); + } + + if (gpio) { + nand->cs[0].rb.type = ATMEL_NAND_GPIO_RB; + nand->cs[0].rb.gpio = gpio; + } + + /* CS GPIO. */ + gpio = devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_HIGH); + if (IS_ERR(gpio)) { + dev_err(dev, "Failed to get CS gpio (err = %ld)\n", + PTR_ERR(gpio)); + return PTR_ERR(gpio); + } + + nand->cs[0].csgpio = gpio; + + /* Card detect GPIO. */ + gpio = devm_gpiod_get_index_optional(nc->dev, NULL, 2, GPIOD_IN); + if (IS_ERR(gpio)) { + dev_err(dev, + "Failed to get detect gpio (err = %ld)\n", + PTR_ERR(gpio)); + return PTR_ERR(gpio); + } + + nand->cdgpio = gpio; + + nand_set_flash_node(&nand->base, nc->dev->of_node); + + return atmel_nand_controller_add_nand(nc, nand); +} + +static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc) +{ + struct device_node *np, *nand_np; + struct device *dev = nc->dev; + int ret, reg_cells; + u32 val; + + /* We do not retrieve the SMC syscon when parsing old DTs. */ + if (nc->caps->legacy_of_bindings) + return atmel_nand_controller_legacy_add_nands(nc); + + np = dev->of_node; + + ret = of_property_read_u32(np, "#address-cells", &val); + if (ret) { + dev_err(dev, "missing #address-cells property\n"); + return ret; + } + + reg_cells = val; + + ret = of_property_read_u32(np, "#size-cells", &val); + if (ret) { + dev_err(dev, "missing #address-cells property\n"); + return ret; + } + + reg_cells += val; + + for_each_child_of_node(np, nand_np) { + struct atmel_nand *nand; + + nand = atmel_nand_create(nc, nand_np, reg_cells); + if (IS_ERR(nand)) { + ret = PTR_ERR(nand); + goto err; + } + + ret = atmel_nand_controller_add_nand(nc, nand); + if (ret) + goto err; + } + + return 0; + +err: + atmel_nand_controller_remove_nands(nc); + + return ret; +} + +static void atmel_nand_controller_cleanup(struct atmel_nand_controller *nc) +{ + if (nc->dmac) + dma_release_channel(nc->dmac); + + clk_put(nc->mck); +} + +static const struct of_device_id atmel_matrix_of_ids[] = { + { + .compatible = "atmel,at91sam9260-matrix", + .data = (void *)AT91SAM9260_MATRIX_EBICSA, + }, + { + .compatible = "atmel,at91sam9261-matrix", + .data = (void *)AT91SAM9261_MATRIX_EBICSA, + }, + { + .compatible = "atmel,at91sam9263-matrix", + .data = (void *)AT91SAM9263_MATRIX_EBI0CSA, + }, + { + .compatible = "atmel,at91sam9rl-matrix", + .data = (void *)AT91SAM9RL_MATRIX_EBICSA, + }, + { + .compatible = "atmel,at91sam9g45-matrix", + .data = (void *)AT91SAM9G45_MATRIX_EBICSA, + }, + { + .compatible = "atmel,at91sam9n12-matrix", + .data = (void *)AT91SAM9N12_MATRIX_EBICSA, + }, + { + .compatible = "atmel,at91sam9x5-matrix", + .data = (void *)AT91SAM9X5_MATRIX_EBICSA, + }, + { /* sentinel */ }, +}; + +static int atmel_nand_controller_init(struct atmel_nand_controller *nc, + struct platform_device *pdev, + const struct atmel_nand_controller_caps *caps) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + int ret; + + nand_hw_control_init(&nc->base); + INIT_LIST_HEAD(&nc->chips); + nc->dev = dev; + nc->caps = caps; + + platform_set_drvdata(pdev, nc); + + nc->pmecc = devm_atmel_pmecc_get(dev); + if (IS_ERR(nc->pmecc)) { + ret = PTR_ERR(nc->pmecc); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Could not get PMECC object (err = %d)\n", + ret); + return ret; + } + + if (nc->caps->has_dma) { + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_MEMCPY, mask); + + nc->dmac = dma_request_channel(mask, NULL, NULL); + if (!nc->dmac) + dev_err(nc->dev, "Failed to request DMA channel\n"); + } + + /* We do not retrieve the SMC syscon when parsing old DTs. */ + if (nc->caps->legacy_of_bindings) + return 0; + + np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0); + if (!np) { + dev_err(dev, "Missing or invalid atmel,smc property\n"); + return -EINVAL; + } + + nc->smc = syscon_node_to_regmap(np); + of_node_put(np); + if (IS_ERR(nc->smc)) { + ret = PTR_ERR(nc->smc); + dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret); + return ret; + } + + return 0; +} + +static int +atmel_smc_nand_controller_init(struct atmel_smc_nand_controller *nc) +{ + struct device *dev = nc->base.dev; + const struct of_device_id *match; + struct device_node *np; + int ret; + + /* We do not retrieve the matrix syscon when parsing old DTs. */ + if (nc->base.caps->legacy_of_bindings) + return 0; + + np = of_parse_phandle(dev->parent->of_node, "atmel,matrix", 0); + if (!np) + return 0; + + match = of_match_node(atmel_matrix_of_ids, np); + if (!match) { + of_node_put(np); + return 0; + } + + nc->matrix = syscon_node_to_regmap(np); + of_node_put(np); + if (IS_ERR(nc->matrix)) { + ret = PTR_ERR(nc->matrix); + dev_err(dev, "Could not get Matrix regmap (err = %d)\n", ret); + return ret; + } + + nc->ebi_csa_offs = (unsigned int)match->data; + + /* + * The at91sam9263 has 2 EBIs, if the NAND controller is under EBI1 + * add 4 to ->ebi_csa_offs. + */ + if (of_device_is_compatible(dev->parent->of_node, + "atmel,at91sam9263-ebi1")) + nc->ebi_csa_offs += 4; + + return 0; +} + +static int +atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc) +{ + struct regmap_config regmap_conf = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + }; + + struct device *dev = nc->base.dev; + struct device_node *nand_np, *nfc_np; + void __iomem *iomem; + struct resource res; + int ret; + + nand_np = dev->of_node; + nfc_np = of_find_compatible_node(dev->of_node, NULL, + "atmel,sama5d3-nfc"); + + nc->clk = of_clk_get(nfc_np, 0); + if (IS_ERR(nc->clk)) { + ret = PTR_ERR(nc->clk); + dev_err(dev, "Failed to retrieve HSMC clock (err = %d)\n", + ret); + goto out; + } + + ret = clk_prepare_enable(nc->clk); + if (ret) { + dev_err(dev, "Failed to enable the HSMC clock (err = %d)\n", + ret); + goto out; + } + + nc->irq = of_irq_get(nand_np, 0); + if (nc->irq < 0) { + ret = nc->irq; + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get IRQ number (err = %d)\n", + ret); + goto out; + } + + ret = of_address_to_resource(nfc_np, 0, &res); + if (ret) { + dev_err(dev, "Invalid or missing NFC IO resource (err = %d)\n", + ret); + goto out; + } + + iomem = devm_ioremap_resource(dev, &res); + if (IS_ERR(iomem)) { + ret = PTR_ERR(iomem); + goto out; + } + + regmap_conf.name = "nfc-io"; + regmap_conf.max_register = resource_size(&res) - 4; + nc->io = devm_regmap_init_mmio(dev, iomem, ®map_conf); + if (IS_ERR(nc->io)) { + ret = PTR_ERR(nc->io); + dev_err(dev, "Could not create NFC IO regmap (err = %d)\n", + ret); + goto out; + } + + ret = of_address_to_resource(nfc_np, 1, &res); + if (ret) { + dev_err(dev, "Invalid or missing HSMC resource (err = %d)\n", + ret); + goto out; + } + + iomem = devm_ioremap_resource(dev, &res); + if (IS_ERR(iomem)) { + ret = PTR_ERR(iomem); + goto out; + } + + regmap_conf.name = "smc"; + regmap_conf.max_register = resource_size(&res) - 4; + nc->base.smc = devm_regmap_init_mmio(dev, iomem, ®map_conf); + if (IS_ERR(nc->base.smc)) { + ret = PTR_ERR(nc->base.smc); + dev_err(dev, "Could not create NFC IO regmap (err = %d)\n", + ret); + goto out; + } + + ret = of_address_to_resource(nfc_np, 2, &res); + if (ret) { + dev_err(dev, "Invalid or missing SRAM resource (err = %d)\n", + ret); + goto out; + } + + nc->sram.virt = devm_ioremap_resource(dev, &res); + if (IS_ERR(nc->sram.virt)) { + ret = PTR_ERR(nc->sram.virt); + goto out; + } + + nc->sram.dma = res.start; + +out: + of_node_put(nfc_np); + + return ret; +} + +static int +atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller *nc) +{ + struct device *dev = nc->base.dev; + struct device_node *np; + int ret; + + np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0); + if (!np) { + dev_err(dev, "Missing or invalid atmel,smc property\n"); + return -EINVAL; + } + + nc->irq = of_irq_get(np, 0); + of_node_put(np); + if (nc->irq < 0) { + if (nc->irq != -EPROBE_DEFER) + dev_err(dev, "Failed to get IRQ number (err = %d)\n", + nc->irq); + return nc->irq; + } + + np = of_parse_phandle(dev->of_node, "atmel,nfc-io", 0); + if (!np) { + dev_err(dev, "Missing or invalid atmel,nfc-io property\n"); + return -EINVAL; + } + + nc->io = syscon_node_to_regmap(np); + of_node_put(np); + if (IS_ERR(nc->io)) { + ret = PTR_ERR(nc->io); + dev_err(dev, "Could not get NFC IO regmap (err = %d)\n", ret); + return ret; + } + + nc->sram.pool = of_gen_pool_get(nc->base.dev->of_node, + "atmel,nfc-sram", 0); + if (!nc->sram.pool) { + dev_err(nc->base.dev, "Missing SRAM\n"); + return -ENOMEM; + } + + nc->sram.virt = gen_pool_dma_alloc(nc->sram.pool, + ATMEL_NFC_SRAM_SIZE, + &nc->sram.dma); + if (!nc->sram.virt) { + dev_err(nc->base.dev, + "Could not allocate memory from the NFC SRAM pool\n"); + return -ENOMEM; + } + + return 0; +} + +static int +atmel_hsmc_nand_controller_remove(struct atmel_nand_controller *nc) +{ + struct atmel_hsmc_nand_controller *hsmc_nc; + int ret; + + ret = atmel_nand_controller_remove_nands(nc); + if (ret) + return ret; + + hsmc_nc = container_of(nc, struct atmel_hsmc_nand_controller, base); + if (hsmc_nc->sram.pool) + gen_pool_free(hsmc_nc->sram.pool, + (unsigned long)hsmc_nc->sram.virt, + ATMEL_NFC_SRAM_SIZE); + + if (hsmc_nc->clk) { + clk_disable_unprepare(hsmc_nc->clk); + clk_put(hsmc_nc->clk); + } + + atmel_nand_controller_cleanup(nc); + + return 0; +} + +static int atmel_hsmc_nand_controller_probe(struct platform_device *pdev, + const struct atmel_nand_controller_caps *caps) +{ + struct device *dev = &pdev->dev; + struct atmel_hsmc_nand_controller *nc; + int ret; + + nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL); + if (!nc) + return -ENOMEM; + + ret = atmel_nand_controller_init(&nc->base, pdev, caps); + if (ret) + return ret; + + if (caps->legacy_of_bindings) + ret = atmel_hsmc_nand_controller_legacy_init(nc); + else + ret = atmel_hsmc_nand_controller_init(nc); + + if (ret) + return ret; + + /* Make sure all irqs are masked before registering our IRQ handler. */ + regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff); + ret = devm_request_irq(dev, nc->irq, atmel_nfc_interrupt, + IRQF_SHARED, "nfc", nc); + if (ret) { + dev_err(dev, + "Could not get register NFC interrupt handler (err = %d)\n", + ret); + goto err; + } + + /* Initial NFC configuration. */ + regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CFG, + ATMEL_HSMC_NFC_CFG_DTO_MAX); + + ret = atmel_nand_controller_add_nands(&nc->base); + if (ret) + goto err; + + return 0; + +err: + atmel_hsmc_nand_controller_remove(&nc->base); + + return ret; +} + +static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = { + .probe = atmel_hsmc_nand_controller_probe, + .remove = atmel_hsmc_nand_controller_remove, + .ecc_init = atmel_hsmc_nand_ecc_init, + .nand_init = atmel_hsmc_nand_init, +}; + +static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = { + .has_dma = true, + .ale_offs = BIT(21), + .cle_offs = BIT(22), + .ops = &atmel_hsmc_nc_ops, +}; + +/* Only used to parse old bindings. */ +static const struct atmel_nand_controller_caps atmel_sama5_nand_caps = { + .has_dma = true, + .ale_offs = BIT(21), + .cle_offs = BIT(22), + .ops = &atmel_hsmc_nc_ops, + .legacy_of_bindings = true, +}; + +static int atmel_smc_nand_controller_probe(struct platform_device *pdev, + const struct atmel_nand_controller_caps *caps) +{ + struct device *dev = &pdev->dev; + struct atmel_smc_nand_controller *nc; + int ret; + + nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL); + if (!nc) + return -ENOMEM; + + ret = atmel_nand_controller_init(&nc->base, pdev, caps); + if (ret) + return ret; + + ret = atmel_smc_nand_controller_init(nc); + if (ret) + return ret; + + return atmel_nand_controller_add_nands(&nc->base); +} + +static int +atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc) +{ + int ret; + + ret = atmel_nand_controller_remove_nands(nc); + if (ret) + return ret; + + atmel_nand_controller_cleanup(nc); + + return 0; +} + +static const struct atmel_nand_controller_ops atmel_smc_nc_ops = { + .probe = atmel_smc_nand_controller_probe, + .remove = atmel_smc_nand_controller_remove, + .ecc_init = atmel_nand_ecc_init, + .nand_init = atmel_smc_nand_init, +}; + +static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = { + .ale_offs = BIT(21), + .cle_offs = BIT(22), + .ops = &atmel_smc_nc_ops, +}; + +static const struct atmel_nand_controller_caps atmel_sam9261_nc_caps = { + .ale_offs = BIT(22), + .cle_offs = BIT(21), + .ops = &atmel_smc_nc_ops, +}; + +static const struct atmel_nand_controller_caps atmel_sam9g45_nc_caps = { + .has_dma = true, + .ale_offs = BIT(21), + .cle_offs = BIT(22), + .ops = &atmel_smc_nc_ops, +}; + +/* Only used to parse old bindings. */ +static const struct atmel_nand_controller_caps atmel_rm9200_nand_caps = { + .ale_offs = BIT(21), + .cle_offs = BIT(22), + .ops = &atmel_smc_nc_ops, + .legacy_of_bindings = true, +}; + +static const struct atmel_nand_controller_caps atmel_sam9261_nand_caps = { + .ale_offs = BIT(22), + .cle_offs = BIT(21), + .ops = &atmel_smc_nc_ops, + .legacy_of_bindings = true, +}; + +static const struct atmel_nand_controller_caps atmel_sam9g45_nand_caps = { + .has_dma = true, + .ale_offs = BIT(21), + .cle_offs = BIT(22), + .ops = &atmel_smc_nc_ops, + .legacy_of_bindings = true, +}; + +static const struct of_device_id atmel_nand_controller_of_ids[] = { + { + .compatible = "atmel,at91rm9200-nand-controller", + .data = &atmel_rm9200_nc_caps, + }, + { + .compatible = "atmel,at91sam9260-nand-controller", + .data = &atmel_rm9200_nc_caps, + }, + { + .compatible = "atmel,at91sam9261-nand-controller", + .data = &atmel_sam9261_nc_caps, + }, + { + .compatible = "atmel,at91sam9g45-nand-controller", + .data = &atmel_sam9g45_nc_caps, + }, + { + .compatible = "atmel,sama5d3-nand-controller", + .data = &atmel_sama5_nc_caps, + }, + /* Support for old/deprecated bindings: */ + { + .compatible = "atmel,at91rm9200-nand", + .data = &atmel_rm9200_nand_caps, + }, + { + .compatible = "atmel,sama5d4-nand", + .data = &atmel_rm9200_nand_caps, + }, + { + .compatible = "atmel,sama5d2-nand", + .data = &atmel_rm9200_nand_caps, + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, atmel_nand_controller_of_ids); + +static int atmel_nand_controller_probe(struct platform_device *pdev) +{ + const struct atmel_nand_controller_caps *caps; + + if (pdev->id_entry) + caps = (void *)pdev->id_entry->driver_data; + else + caps = of_device_get_match_data(&pdev->dev); + + if (!caps) { + dev_err(&pdev->dev, "Could not retrieve NFC caps\n"); + return -EINVAL; + } + + if (caps->legacy_of_bindings) { + u32 ale_offs = 21; + + /* + * If we are parsing legacy DT props and the DT contains a + * valid NFC node, forward the request to the sama5 logic. + */ + if (of_find_compatible_node(pdev->dev.of_node, NULL, + "atmel,sama5d3-nfc")) + caps = &atmel_sama5_nand_caps; + + /* + * Even if the compatible says we are dealing with an + * at91rm9200 controller, the atmel,nand-has-dma specify that + * this controller supports DMA, which means we are in fact + * dealing with an at91sam9g45+ controller. + */ + if (!caps->has_dma && + of_property_read_bool(pdev->dev.of_node, + "atmel,nand-has-dma")) + caps = &atmel_sam9g45_nand_caps; + + /* + * All SoCs except the at91sam9261 are assigning ALE to A21 and + * CLE to A22. If atmel,nand-addr-offset != 21 this means we're + * actually dealing with an at91sam9261 controller. + */ + of_property_read_u32(pdev->dev.of_node, + "atmel,nand-addr-offset", &ale_offs); + if (ale_offs != 21) + caps = &atmel_sam9261_nand_caps; + } + + return caps->ops->probe(pdev, caps); +} + +static int atmel_nand_controller_remove(struct platform_device *pdev) +{ + struct atmel_nand_controller *nc = platform_get_drvdata(pdev); + + return nc->caps->ops->remove(nc); +} + +static struct platform_driver atmel_nand_controller_driver = { + .driver = { + .name = "atmel-nand-controller", + .of_match_table = of_match_ptr(atmel_nand_controller_of_ids), + }, + .probe = atmel_nand_controller_probe, + .remove = atmel_nand_controller_remove, +}; +module_platform_driver(atmel_nand_controller_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>"); +MODULE_DESCRIPTION("NAND Flash Controller driver for Atmel SoCs"); +MODULE_ALIAS("platform:atmel-nand-controller"); diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c new file mode 100644 index 000000000000..55a8ee5306ea --- /dev/null +++ b/drivers/mtd/nand/atmel/pmecc.c @@ -0,0 +1,1020 @@ +/* + * Copyright 2017 ATMEL + * Copyright 2017 Free Electrons + * + * Author: Boris Brezillon <boris.brezillon@free-electrons.com> + * + * Derived from the atmel_nand.c driver which contained the following + * copyrights: + * + * Copyright 2003 Rick Bronson + * + * Derived from drivers/mtd/nand/autcpu12.c + * Copyright 2001 Thomas Gleixner (gleixner@autronix.de) + * + * Derived from drivers/mtd/spia.c + * Copyright 2000 Steven J. Hill (sjhill@cotw.com) + * + * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263 + * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright 2007 + * + * Derived from Das U-Boot source code + * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c) + * Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas + * + * Add Programmable Multibit ECC support for various AT91 SoC + * Copyright 2012 ATMEL, Hong Xu + * + * Add Nand Flash Controller support for SAMA5 SoC + * Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * The PMECC is an hardware assisted BCH engine, which means part of the + * ECC algorithm is left to the software. The hardware/software repartition + * is explained in the "PMECC Controller Functional Description" chapter in + * Atmel datasheets, and some of the functions in this file are directly + * implementing the algorithms described in the "Software Implementation" + * sub-section. + * + * TODO: it seems that the software BCH implementation in lib/bch.c is already + * providing some of the logic we are implementing here. It would be smart + * to expose the needed lib/bch.c helpers/functions and re-use them here. + */ + +#include <linux/genalloc.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/mtd/nand.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include "pmecc.h" + +/* Galois field dimension */ +#define PMECC_GF_DIMENSION_13 13 +#define PMECC_GF_DIMENSION_14 14 + +/* Primitive Polynomial used by PMECC */ +#define PMECC_GF_13_PRIMITIVE_POLY 0x201b +#define PMECC_GF_14_PRIMITIVE_POLY 0x4443 + +#define PMECC_LOOKUP_TABLE_SIZE_512 0x2000 +#define PMECC_LOOKUP_TABLE_SIZE_1024 0x4000 + +/* Time out value for reading PMECC status register */ +#define PMECC_MAX_TIMEOUT_MS 100 + +/* PMECC Register Definitions */ +#define ATMEL_PMECC_CFG 0x0 +#define PMECC_CFG_BCH_STRENGTH(x) (x) +#define PMECC_CFG_BCH_STRENGTH_MASK GENMASK(2, 0) +#define PMECC_CFG_SECTOR512 (0 << 4) +#define PMECC_CFG_SECTOR1024 (1 << 4) +#define PMECC_CFG_NSECTORS(x) ((fls(x) - 1) << 8) +#define PMECC_CFG_READ_OP (0 << 12) +#define PMECC_CFG_WRITE_OP (1 << 12) +#define PMECC_CFG_SPARE_ENABLE BIT(16) +#define PMECC_CFG_AUTO_ENABLE BIT(20) + +#define ATMEL_PMECC_SAREA 0x4 +#define ATMEL_PMECC_SADDR 0x8 +#define ATMEL_PMECC_EADDR 0xc + +#define ATMEL_PMECC_CLK 0x10 +#define PMECC_CLK_133MHZ (2 << 0) + +#define ATMEL_PMECC_CTRL 0x14 +#define PMECC_CTRL_RST BIT(0) +#define PMECC_CTRL_DATA BIT(1) +#define PMECC_CTRL_USER BIT(2) +#define PMECC_CTRL_ENABLE BIT(4) +#define PMECC_CTRL_DISABLE BIT(5) + +#define ATMEL_PMECC_SR 0x18 +#define PMECC_SR_BUSY BIT(0) +#define PMECC_SR_ENABLE BIT(4) + +#define ATMEL_PMECC_IER 0x1c +#define ATMEL_PMECC_IDR 0x20 +#define ATMEL_PMECC_IMR 0x24 +#define ATMEL_PMECC_ISR 0x28 +#define PMECC_ERROR_INT BIT(0) + +#define ATMEL_PMECC_ECC(sector, n) \ + ((((sector) + 1) * 0x40) + (n)) + +#define ATMEL_PMECC_REM(sector, n) \ + ((((sector) + 1) * 0x40) + ((n) * 4) + 0x200) + +/* PMERRLOC Register Definitions */ +#define ATMEL_PMERRLOC_ELCFG 0x0 +#define PMERRLOC_ELCFG_SECTOR_512 (0 << 0) +#define PMERRLOC_ELCFG_SECTOR_1024 (1 << 0) +#define PMERRLOC_ELCFG_NUM_ERRORS(n) ((n) << 16) + +#define ATMEL_PMERRLOC_ELPRIM 0x4 +#define ATMEL_PMERRLOC_ELEN 0x8 +#define ATMEL_PMERRLOC_ELDIS 0xc +#define PMERRLOC_DISABLE BIT(0) + +#define ATMEL_PMERRLOC_ELSR 0x10 +#define PMERRLOC_ELSR_BUSY BIT(0) + +#define ATMEL_PMERRLOC_ELIER 0x14 +#define ATMEL_PMERRLOC_ELIDR 0x18 +#define ATMEL_PMERRLOC_ELIMR 0x1c +#define ATMEL_PMERRLOC_ELISR 0x20 +#define PMERRLOC_ERR_NUM_MASK GENMASK(12, 8) +#define PMERRLOC_CALC_DONE BIT(0) + +#define ATMEL_PMERRLOC_SIGMA(x) (((x) * 0x4) + 0x28) + +#define ATMEL_PMERRLOC_EL(offs, x) (((x) * 0x4) + (offs)) + +struct atmel_pmecc_gf_tables { + u16 *alpha_to; + u16 *index_of; +}; + +struct atmel_pmecc_caps { + const int *strengths; + int nstrengths; + int el_offset; + bool correct_erased_chunks; +}; + +struct atmel_pmecc { + struct device *dev; + const struct atmel_pmecc_caps *caps; + + struct { + void __iomem *base; + void __iomem *errloc; + } regs; + + struct mutex lock; +}; + +struct atmel_pmecc_user_conf_cache { + u32 cfg; + u32 sarea; + u32 saddr; + u32 eaddr; +}; + +struct atmel_pmecc_user { + struct atmel_pmecc_user_conf_cache cache; + struct atmel_pmecc *pmecc; + const struct atmel_pmecc_gf_tables *gf_tables; + int eccbytes; + s16 *partial_syn; + s16 *si; + s16 *lmu; + s16 *smu; + s32 *mu; + s32 *dmu; + s32 *delta; + u32 isr; +}; + +static DEFINE_MUTEX(pmecc_gf_tables_lock); +static const struct atmel_pmecc_gf_tables *pmecc_gf_tables_512; +static const struct atmel_pmecc_gf_tables *pmecc_gf_tables_1024; + +static inline int deg(unsigned int poly) +{ + /* polynomial degree is the most-significant bit index */ + return fls(poly) - 1; +} + +static int atmel_pmecc_build_gf_tables(int mm, unsigned int poly, + struct atmel_pmecc_gf_tables *gf_tables) +{ + unsigned int i, x = 1; + const unsigned int k = BIT(deg(poly)); + unsigned int nn = BIT(mm) - 1; + + /* primitive polynomial must be of degree m */ + if (k != (1u << mm)) + return -EINVAL; + + for (i = 0; i < nn; i++) { + gf_tables->alpha_to[i] = x; + gf_tables->index_of[x] = i; + if (i && (x == 1)) + /* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */ + return -EINVAL; + x <<= 1; + if (x & k) + x ^= poly; + } + gf_tables->alpha_to[nn] = 1; + gf_tables->index_of[0] = 0; + + return 0; +} + +static const struct atmel_pmecc_gf_tables * +atmel_pmecc_create_gf_tables(const struct atmel_pmecc_user_req *req) +{ + struct atmel_pmecc_gf_tables *gf_tables; + unsigned int poly, degree, table_size; + int ret; + + if (req->ecc.sectorsize == 512) { + degree = PMECC_GF_DIMENSION_13; + poly = PMECC_GF_13_PRIMITIVE_POLY; + table_size = PMECC_LOOKUP_TABLE_SIZE_512; + } else { + degree = PMECC_GF_DIMENSION_14; + poly = PMECC_GF_14_PRIMITIVE_POLY; + table_size = PMECC_LOOKUP_TABLE_SIZE_1024; + } + + gf_tables = kzalloc(sizeof(*gf_tables) + + (2 * table_size * sizeof(u16)), + GFP_KERNEL); + if (!gf_tables) + return ERR_PTR(-ENOMEM); + + gf_tables->alpha_to = (void *)(gf_tables + 1); + gf_tables->index_of = gf_tables->alpha_to + table_size; + + ret = atmel_pmecc_build_gf_tables(degree, poly, gf_tables); + if (ret) { + kfree(gf_tables); + return ERR_PTR(ret); + } + + return gf_tables; +} + +static const struct atmel_pmecc_gf_tables * +atmel_pmecc_get_gf_tables(const struct atmel_pmecc_user_req *req) +{ + const struct atmel_pmecc_gf_tables **gf_tables, *ret; + + mutex_lock(&pmecc_gf_tables_lock); + if (req->ecc.sectorsize == 512) + gf_tables = &pmecc_gf_tables_512; + else + gf_tables = &pmecc_gf_tables_1024; + + ret = *gf_tables; + + if (!ret) { + ret = atmel_pmecc_create_gf_tables(req); + if (!IS_ERR(ret)) + *gf_tables = ret; + } + mutex_unlock(&pmecc_gf_tables_lock); + + return ret; +} + +static int atmel_pmecc_prepare_user_req(struct atmel_pmecc *pmecc, + struct atmel_pmecc_user_req *req) +{ + int i, max_eccbytes, eccbytes = 0, eccstrength = 0; + + if (req->pagesize <= 0 || req->oobsize <= 0 || req->ecc.bytes <= 0) + return -EINVAL; + + if (req->ecc.ooboffset >= 0 && + req->ecc.ooboffset + req->ecc.bytes > req->oobsize) + return -EINVAL; + + if (req->ecc.sectorsize == ATMEL_PMECC_SECTOR_SIZE_AUTO) { + if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH) + return -EINVAL; + + if (req->pagesize > 512) + req->ecc.sectorsize = 1024; + else + req->ecc.sectorsize = 512; + } + + if (req->ecc.sectorsize != 512 && req->ecc.sectorsize != 1024) + return -EINVAL; + + if (req->pagesize % req->ecc.sectorsize) + return -EINVAL; + + req->ecc.nsectors = req->pagesize / req->ecc.sectorsize; + + max_eccbytes = req->ecc.bytes; + + for (i = 0; i < pmecc->caps->nstrengths; i++) { + int nbytes, strength = pmecc->caps->strengths[i]; + + if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH && + strength < req->ecc.strength) + continue; + + nbytes = DIV_ROUND_UP(strength * fls(8 * req->ecc.sectorsize), + 8); + nbytes *= req->ecc.nsectors; + + if (nbytes > max_eccbytes) + break; + + eccstrength = strength; + eccbytes = nbytes; + + if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH) + break; + } + + if (!eccstrength) + return -EINVAL; + + req->ecc.bytes = eccbytes; + req->ecc.strength = eccstrength; + + if (req->ecc.ooboffset < 0) + req->ecc.ooboffset = req->oobsize - eccbytes; + + return 0; +} + +struct atmel_pmecc_user * +atmel_pmecc_create_user(struct atmel_pmecc *pmecc, + struct atmel_pmecc_user_req *req) +{ + struct atmel_pmecc_user *user; + const struct atmel_pmecc_gf_tables *gf_tables; + int strength, size, ret; + + ret = atmel_pmecc_prepare_user_req(pmecc, req); + if (ret) + return ERR_PTR(ret); + + size = sizeof(*user); + size = ALIGN(size, sizeof(u16)); + /* Reserve space for partial_syn, si and smu */ + size += ((2 * req->ecc.strength) + 1) * sizeof(u16) * + (2 + req->ecc.strength + 2); + /* Reserve space for lmu. */ + size += (req->ecc.strength + 1) * sizeof(u16); + /* Reserve space for mu, dmu and delta. */ + size = ALIGN(size, sizeof(s32)); + size += (req->ecc.strength + 1) * sizeof(s32); + + user = kzalloc(size, GFP_KERNEL); + if (!user) + return ERR_PTR(-ENOMEM); + + user->pmecc = pmecc; + + user->partial_syn = (s16 *)PTR_ALIGN(user + 1, sizeof(u16)); + user->si = user->partial_syn + ((2 * req->ecc.strength) + 1); + user->lmu = user->si + ((2 * req->ecc.strength) + 1); + user->smu = user->lmu + (req->ecc.strength + 1); + user->mu = (s32 *)PTR_ALIGN(user->smu + + (((2 * req->ecc.strength) + 1) * + (req->ecc.strength + 2)), + sizeof(s32)); + user->dmu = user->mu + req->ecc.strength + 1; + user->delta = user->dmu + req->ecc.strength + 1; + + gf_tables = atmel_pmecc_get_gf_tables(req); + if (IS_ERR(gf_tables)) { + kfree(user); + return ERR_CAST(gf_tables); + } + + user->gf_tables = gf_tables; + + user->eccbytes = req->ecc.bytes / req->ecc.nsectors; + + for (strength = 0; strength < pmecc->caps->nstrengths; strength++) { + if (pmecc->caps->strengths[strength] == req->ecc.strength) + break; + } + + user->cache.cfg = PMECC_CFG_BCH_STRENGTH(strength) | + PMECC_CFG_NSECTORS(req->ecc.nsectors); + + if (req->ecc.sectorsize == 1024) + user->cache.cfg |= PMECC_CFG_SECTOR1024; + + user->cache.sarea = req->oobsize - 1; + user->cache.saddr = req->ecc.ooboffset; + user->cache.eaddr = req->ecc.ooboffset + req->ecc.bytes - 1; + + return user; +} +EXPORT_SYMBOL_GPL(atmel_pmecc_create_user); + +void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user) +{ + kfree(user); +} +EXPORT_SYMBOL_GPL(atmel_pmecc_destroy_user); + +static int get_strength(struct atmel_pmecc_user *user) +{ + const int *strengths = user->pmecc->caps->strengths; + + return strengths[user->cache.cfg & PMECC_CFG_BCH_STRENGTH_MASK]; +} + +static int get_sectorsize(struct atmel_pmecc_user *user) +{ + return user->cache.cfg & PMECC_LOOKUP_TABLE_SIZE_1024 ? 1024 : 512; +} + +static void atmel_pmecc_gen_syndrome(struct atmel_pmecc_user *user, int sector) +{ + int strength = get_strength(user); + u32 value; + int i; + + /* Fill odd syndromes */ + for (i = 0; i < strength; i++) { + value = readl_relaxed(user->pmecc->regs.base + + ATMEL_PMECC_REM(sector, i / 2)); + if (i & 1) + value >>= 16; + + user->partial_syn[(2 * i) + 1] = value; + } +} + +static void atmel_pmecc_substitute(struct atmel_pmecc_user *user) +{ + int degree = get_sectorsize(user) == 512 ? 13 : 14; + int cw_len = BIT(degree) - 1; + int strength = get_strength(user); + s16 *alpha_to = user->gf_tables->alpha_to; + s16 *index_of = user->gf_tables->index_of; + s16 *partial_syn = user->partial_syn; + s16 *si; + int i, j; + + /* + * si[] is a table that holds the current syndrome value, + * an element of that table belongs to the field + */ + si = user->si; + + memset(&si[1], 0, sizeof(s16) * ((2 * strength) - 1)); + + /* Computation 2t syndromes based on S(x) */ + /* Odd syndromes */ + for (i = 1; i < 2 * strength; i += 2) { + for (j = 0; j < degree; j++) { + if (partial_syn[i] & BIT(j)) + si[i] = alpha_to[i * j] ^ si[i]; + } + } + /* Even syndrome = (Odd syndrome) ** 2 */ + for (i = 2, j = 1; j <= strength; i = ++j << 1) { + if (si[j] == 0) { + si[i] = 0; + } else { + s16 tmp; + + tmp = index_of[si[j]]; + tmp = (tmp * 2) % cw_len; + si[i] = alpha_to[tmp]; + } + } +} + +static void atmel_pmecc_get_sigma(struct atmel_pmecc_user *user) +{ + s16 *lmu = user->lmu; + s16 *si = user->si; + s32 *mu = user->mu; + s32 *dmu = user->dmu; + s32 *delta = user->delta; + int degree = get_sectorsize(user) == 512 ? 13 : 14; + int cw_len = BIT(degree) - 1; + int strength = get_strength(user); + int num = 2 * strength + 1; + s16 *index_of = user->gf_tables->index_of; + s16 *alpha_to = user->gf_tables->alpha_to; + int i, j, k; + u32 dmu_0_count, tmp; + s16 *smu = user->smu; + + /* index of largest delta */ + int ro; + int largest; + int diff; + + dmu_0_count = 0; + + /* First Row */ + + /* Mu */ + mu[0] = -1; + + memset(smu, 0, sizeof(s16) * num); + smu[0] = 1; + + /* discrepancy set to 1 */ + dmu[0] = 1; + /* polynom order set to 0 */ + lmu[0] = 0; + delta[0] = (mu[0] * 2 - lmu[0]) >> 1; + + /* Second Row */ + + /* Mu */ + mu[1] = 0; + /* Sigma(x) set to 1 */ + memset(&smu[num], 0, sizeof(s16) * num); + smu[num] = 1; + + /* discrepancy set to S1 */ + dmu[1] = si[1]; + + /* polynom order set to 0 */ + lmu[1] = 0; + + delta[1] = (mu[1] * 2 - lmu[1]) >> 1; + + /* Init the Sigma(x) last row */ + memset(&smu[(strength + 1) * num], 0, sizeof(s16) * num); + + for (i = 1; i <= strength; i++) { + mu[i + 1] = i << 1; + /* Begin Computing Sigma (Mu+1) and L(mu) */ + /* check if discrepancy is set to 0 */ + if (dmu[i] == 0) { + dmu_0_count++; + + tmp = ((strength - (lmu[i] >> 1) - 1) / 2); + if ((strength - (lmu[i] >> 1) - 1) & 0x1) + tmp += 2; + else + tmp += 1; + + if (dmu_0_count == tmp) { + for (j = 0; j <= (lmu[i] >> 1) + 1; j++) + smu[(strength + 1) * num + j] = + smu[i * num + j]; + + lmu[strength + 1] = lmu[i]; + return; + } + + /* copy polynom */ + for (j = 0; j <= lmu[i] >> 1; j++) + smu[(i + 1) * num + j] = smu[i * num + j]; + + /* copy previous polynom order to the next */ + lmu[i + 1] = lmu[i]; + } else { + ro = 0; + largest = -1; + /* find largest delta with dmu != 0 */ + for (j = 0; j < i; j++) { + if ((dmu[j]) && (delta[j] > largest)) { + largest = delta[j]; + ro = j; + } + } + + /* compute difference */ + diff = (mu[i] - mu[ro]); + + /* Compute degree of the new smu polynomial */ + if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff)) + lmu[i + 1] = lmu[i]; + else + lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2; + + /* Init smu[i+1] with 0 */ + for (k = 0; k < num; k++) + smu[(i + 1) * num + k] = 0; + + /* Compute smu[i+1] */ + for (k = 0; k <= lmu[ro] >> 1; k++) { + s16 a, b, c; + + if (!(smu[ro * num + k] && dmu[i])) + continue; + + a = index_of[dmu[i]]; + b = index_of[dmu[ro]]; + c = index_of[smu[ro * num + k]]; + tmp = a + (cw_len - b) + c; + a = alpha_to[tmp % cw_len]; + smu[(i + 1) * num + (k + diff)] = a; + } + + for (k = 0; k <= lmu[i] >> 1; k++) + smu[(i + 1) * num + k] ^= smu[i * num + k]; + } + + /* End Computing Sigma (Mu+1) and L(mu) */ + /* In either case compute delta */ + delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1; + + /* Do not compute discrepancy for the last iteration */ + if (i >= strength) + continue; + + for (k = 0; k <= (lmu[i + 1] >> 1); k++) { + tmp = 2 * (i - 1); + if (k == 0) { + dmu[i + 1] = si[tmp + 3]; + } else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) { + s16 a, b, c; + + a = index_of[smu[(i + 1) * num + k]]; + b = si[2 * (i - 1) + 3 - k]; + c = index_of[b]; + tmp = a + c; + tmp %= cw_len; + dmu[i + 1] = alpha_to[tmp] ^ dmu[i + 1]; + } + } + } +} + +static int atmel_pmecc_err_location(struct atmel_pmecc_user *user) +{ + int sector_size = get_sectorsize(user); + int degree = sector_size == 512 ? 13 : 14; + struct atmel_pmecc *pmecc = user->pmecc; + int strength = get_strength(user); + int ret, roots_nbr, i, err_nbr = 0; + int num = (2 * strength) + 1; + s16 *smu = user->smu; + u32 val; + + writel(PMERRLOC_DISABLE, pmecc->regs.errloc + ATMEL_PMERRLOC_ELDIS); + + for (i = 0; i <= user->lmu[strength + 1] >> 1; i++) { + writel_relaxed(smu[(strength + 1) * num + i], + pmecc->regs.errloc + ATMEL_PMERRLOC_SIGMA(i)); + err_nbr++; + } + + val = (err_nbr - 1) << 16; + if (sector_size == 1024) + val |= 1; + + writel(val, pmecc->regs.errloc + ATMEL_PMERRLOC_ELCFG); + writel((sector_size * 8) + (degree * strength), + pmecc->regs.errloc + ATMEL_PMERRLOC_ELEN); + + ret = readl_relaxed_poll_timeout(pmecc->regs.errloc + + ATMEL_PMERRLOC_ELISR, + val, val & PMERRLOC_CALC_DONE, 0, + PMECC_MAX_TIMEOUT_MS * 1000); + if (ret) { + dev_err(pmecc->dev, + "PMECC: Timeout to calculate error location.\n"); + return ret; + } + + roots_nbr = (val & PMERRLOC_ERR_NUM_MASK) >> 8; + /* Number of roots == degree of smu hence <= cap */ + if (roots_nbr == user->lmu[strength + 1] >> 1) + return err_nbr - 1; + + /* + * Number of roots does not match the degree of smu + * unable to correct error. + */ + return -EBADMSG; +} + +int atmel_pmecc_correct_sector(struct atmel_pmecc_user *user, int sector, + void *data, void *ecc) +{ + struct atmel_pmecc *pmecc = user->pmecc; + int sectorsize = get_sectorsize(user); + int eccbytes = user->eccbytes; + int i, nerrors; + + if (!(user->isr & BIT(sector))) + return 0; + + atmel_pmecc_gen_syndrome(user, sector); + atmel_pmecc_substitute(user); + atmel_pmecc_get_sigma(user); + + nerrors = atmel_pmecc_err_location(user); + if (nerrors < 0) + return nerrors; + + for (i = 0; i < nerrors; i++) { + const char *area; + int byte, bit; + u32 errpos; + u8 *ptr; + + errpos = readl_relaxed(pmecc->regs.errloc + + ATMEL_PMERRLOC_EL(pmecc->caps->el_offset, i)); + errpos--; + + byte = errpos / 8; + bit = errpos % 8; + + if (byte < sectorsize) { + ptr = data + byte; + area = "data"; + } else if (byte < sectorsize + eccbytes) { + ptr = ecc + byte - sectorsize; + area = "ECC"; + } else { + dev_dbg(pmecc->dev, + "Invalid errpos value (%d, max is %d)\n", + errpos, (sectorsize + eccbytes) * 8); + return -EINVAL; + } + + dev_dbg(pmecc->dev, + "Bit flip in %s area, byte %d: 0x%02x -> 0x%02x\n", + area, byte, *ptr, (unsigned int)(*ptr ^ BIT(bit))); + + *ptr ^= BIT(bit); + } + + return nerrors; +} +EXPORT_SYMBOL_GPL(atmel_pmecc_correct_sector); + +bool atmel_pmecc_correct_erased_chunks(struct atmel_pmecc_user *user) +{ + return user->pmecc->caps->correct_erased_chunks; +} +EXPORT_SYMBOL_GPL(atmel_pmecc_correct_erased_chunks); + +void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user, + int sector, void *ecc) +{ + struct atmel_pmecc *pmecc = user->pmecc; + u8 *ptr = ecc; + int i; + + for (i = 0; i < user->eccbytes; i++) + ptr[i] = readb_relaxed(pmecc->regs.base + + ATMEL_PMECC_ECC(sector, i)); +} +EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes); + +int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op) +{ + struct atmel_pmecc *pmecc = user->pmecc; + u32 cfg; + + if (op != NAND_ECC_READ && op != NAND_ECC_WRITE) { + dev_err(pmecc->dev, "Bad ECC operation!"); + return -EINVAL; + } + + mutex_lock(&user->pmecc->lock); + + cfg = user->cache.cfg; + if (op == NAND_ECC_WRITE) + cfg |= PMECC_CFG_WRITE_OP; + else + cfg |= PMECC_CFG_AUTO_ENABLE; + + writel(cfg, pmecc->regs.base + ATMEL_PMECC_CFG); + writel(user->cache.sarea, pmecc->regs.base + ATMEL_PMECC_SAREA); + writel(user->cache.saddr, pmecc->regs.base + ATMEL_PMECC_SADDR); + writel(user->cache.eaddr, pmecc->regs.base + ATMEL_PMECC_EADDR); + + writel(PMECC_CTRL_ENABLE, pmecc->regs.base + ATMEL_PMECC_CTRL); + writel(PMECC_CTRL_DATA, pmecc->regs.base + ATMEL_PMECC_CTRL); + + return 0; +} +EXPORT_SYMBOL_GPL(atmel_pmecc_enable); + +void atmel_pmecc_disable(struct atmel_pmecc_user *user) +{ + struct atmel_pmecc *pmecc = user->pmecc; + + writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL); + writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL); + mutex_unlock(&user->pmecc->lock); +} +EXPORT_SYMBOL_GPL(atmel_pmecc_disable); + +int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user) +{ + struct atmel_pmecc *pmecc = user->pmecc; + u32 status; + int ret; + + ret = readl_relaxed_poll_timeout(pmecc->regs.base + + ATMEL_PMECC_SR, + status, !(status & PMECC_SR_BUSY), 0, + PMECC_MAX_TIMEOUT_MS * 1000); + if (ret) { + dev_err(pmecc->dev, + "Timeout while waiting for PMECC ready.\n"); + return ret; + } + + user->isr = readl_relaxed(pmecc->regs.base + ATMEL_PMECC_ISR); + + return 0; +} +EXPORT_SYMBOL_GPL(atmel_pmecc_wait_rdy); + +static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev, + const struct atmel_pmecc_caps *caps, + int pmecc_res_idx, int errloc_res_idx) +{ + struct device *dev = &pdev->dev; + struct atmel_pmecc *pmecc; + struct resource *res; + + pmecc = devm_kzalloc(dev, sizeof(*pmecc), GFP_KERNEL); + if (!pmecc) + return ERR_PTR(-ENOMEM); + + pmecc->caps = caps; + pmecc->dev = dev; + mutex_init(&pmecc->lock); + + res = platform_get_resource(pdev, IORESOURCE_MEM, pmecc_res_idx); + pmecc->regs.base = devm_ioremap_resource(dev, res); + if (IS_ERR(pmecc->regs.base)) + return ERR_CAST(pmecc->regs.base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, errloc_res_idx); + pmecc->regs.errloc = devm_ioremap_resource(dev, res); + if (IS_ERR(pmecc->regs.errloc)) + return ERR_CAST(pmecc->regs.errloc); + + /* Disable all interrupts before registering the PMECC handler. */ + writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR); + + /* Reset the ECC engine */ + writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL); + writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL); + + return pmecc; +} + +static void devm_atmel_pmecc_put(struct device *dev, void *res) +{ + struct atmel_pmecc **pmecc = res; + + put_device((*pmecc)->dev); +} + +static struct atmel_pmecc *atmel_pmecc_get_by_node(struct device *userdev, + struct device_node *np) +{ + struct platform_device *pdev; + struct atmel_pmecc *pmecc, **ptr; + + pdev = of_find_device_by_node(np); + if (!pdev || !platform_get_drvdata(pdev)) + return ERR_PTR(-EPROBE_DEFER); + + ptr = devres_alloc(devm_atmel_pmecc_put, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + get_device(&pdev->dev); + pmecc = platform_get_drvdata(pdev); + + *ptr = pmecc; + + devres_add(userdev, ptr); + + return pmecc; +} + +static const int atmel_pmecc_strengths[] = { 2, 4, 8, 12, 24, 32 }; + +static struct atmel_pmecc_caps at91sam9g45_caps = { + .strengths = atmel_pmecc_strengths, + .nstrengths = 5, + .el_offset = 0x8c, +}; + +static struct atmel_pmecc_caps sama5d4_caps = { + .strengths = atmel_pmecc_strengths, + .nstrengths = 5, + .el_offset = 0x8c, + .correct_erased_chunks = true, +}; + +static struct atmel_pmecc_caps sama5d2_caps = { + .strengths = atmel_pmecc_strengths, + .nstrengths = 6, + .el_offset = 0xac, + .correct_erased_chunks = true, +}; + +static const struct of_device_id atmel_pmecc_legacy_match[] = { + { .compatible = "atmel,sama5d4-nand", &sama5d4_caps }, + { .compatible = "atmel,sama5d2-nand", &sama5d2_caps }, + { /* sentinel */ } +}; + +struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev) +{ + struct atmel_pmecc *pmecc; + struct device_node *np; + + if (!userdev) + return ERR_PTR(-EINVAL); + + if (!userdev->of_node) + return NULL; + + np = of_parse_phandle(userdev->of_node, "ecc-engine", 0); + if (np) { + pmecc = atmel_pmecc_get_by_node(userdev, np); + of_node_put(np); + } else { + /* + * Support old DT bindings: in this case the PMECC iomem + * resources are directly defined in the user pdev at position + * 1 and 2. Extract all relevant information from there. + */ + struct platform_device *pdev = to_platform_device(userdev); + const struct atmel_pmecc_caps *caps; + + /* No PMECC engine available. */ + if (!of_property_read_bool(userdev->of_node, + "atmel,has-pmecc")) + return NULL; + + caps = &at91sam9g45_caps; + + /* + * Try to find the NFC subnode and extract the associated caps + * from there. + */ + np = of_find_compatible_node(userdev->of_node, NULL, + "atmel,sama5d3-nfc"); + if (np) { + const struct of_device_id *match; + + match = of_match_node(atmel_pmecc_legacy_match, np); + if (match && match->data) + caps = match->data; + + of_node_put(np); + } + + pmecc = atmel_pmecc_create(pdev, caps, 1, 2); + } + + return pmecc; +} +EXPORT_SYMBOL(devm_atmel_pmecc_get); + +static const struct of_device_id atmel_pmecc_match[] = { + { .compatible = "atmel,at91sam9g45-pmecc", &at91sam9g45_caps }, + { .compatible = "atmel,sama5d4-pmecc", &sama5d4_caps }, + { .compatible = "atmel,sama5d2-pmecc", &sama5d2_caps }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, atmel_pmecc_match); + +static int atmel_pmecc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct atmel_pmecc_caps *caps; + struct atmel_pmecc *pmecc; + + caps = of_device_get_match_data(&pdev->dev); + if (!caps) { + dev_err(dev, "Invalid caps\n"); + return -EINVAL; + } + + pmecc = atmel_pmecc_create(pdev, caps, 0, 1); + if (IS_ERR(pmecc)) + return PTR_ERR(pmecc); + + platform_set_drvdata(pdev, pmecc); + + return 0; +} + +static struct platform_driver atmel_pmecc_driver = { + .driver = { + .name = "atmel-pmecc", + .of_match_table = of_match_ptr(atmel_pmecc_match), + }, + .probe = atmel_pmecc_probe, +}; +module_platform_driver(atmel_pmecc_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>"); +MODULE_DESCRIPTION("PMECC engine driver"); +MODULE_ALIAS("platform:atmel_pmecc"); diff --git a/drivers/mtd/nand/atmel/pmecc.h b/drivers/mtd/nand/atmel/pmecc.h new file mode 100644 index 000000000000..a8ddbfca2ea5 --- /dev/null +++ b/drivers/mtd/nand/atmel/pmecc.h @@ -0,0 +1,73 @@ +/* + * © Copyright 2016 ATMEL + * © Copyright 2016 Free Electrons + * + * Author: Boris Brezillon <boris.brezillon@free-electrons.com> + * + * Derived from the atmel_nand.c driver which contained the following + * copyrights: + * + * Copyright © 2003 Rick Bronson + * + * Derived from drivers/mtd/nand/autcpu12.c + * Copyright © 2001 Thomas Gleixner (gleixner@autronix.de) + * + * Derived from drivers/mtd/spia.c + * Copyright © 2000 Steven J. Hill (sjhill@cotw.com) + * + * + * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263 + * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright © 2007 + * + * Derived from Das U-Boot source code + * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c) + * © Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas + * + * Add Programmable Multibit ECC support for various AT91 SoC + * © Copyright 2012 ATMEL, Hong Xu + * + * Add Nand Flash Controller support for SAMA5 SoC + * © Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef ATMEL_PMECC_H +#define ATMEL_PMECC_H + +#define ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH 0 +#define ATMEL_PMECC_SECTOR_SIZE_AUTO 0 +#define ATMEL_PMECC_OOBOFFSET_AUTO -1 + +struct atmel_pmecc_user_req { + int pagesize; + int oobsize; + struct { + int strength; + int bytes; + int sectorsize; + int nsectors; + int ooboffset; + } ecc; +}; + +struct atmel_pmecc *devm_atmel_pmecc_get(struct device *dev); + +struct atmel_pmecc_user * +atmel_pmecc_create_user(struct atmel_pmecc *pmecc, + struct atmel_pmecc_user_req *req); +void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user); + +int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op); +void atmel_pmecc_disable(struct atmel_pmecc_user *user); +int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user); +int atmel_pmecc_correct_sector(struct atmel_pmecc_user *user, int sector, + void *data, void *ecc); +bool atmel_pmecc_correct_erased_chunks(struct atmel_pmecc_user *user); +void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user, + int sector, void *ecc); + +#endif /* ATMEL_PMECC_H */ diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c deleted file mode 100644 index 9ebd5ecefea6..000000000000 --- a/drivers/mtd/nand/atmel_nand.c +++ /dev/null @@ -1,2479 +0,0 @@ -/* - * Copyright © 2003 Rick Bronson - * - * Derived from drivers/mtd/nand/autcpu12.c - * Copyright © 2001 Thomas Gleixner (gleixner@autronix.de) - * - * Derived from drivers/mtd/spia.c - * Copyright © 2000 Steven J. Hill (sjhill@cotw.com) - * - * - * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263 - * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright © 2007 - * - * Derived from Das U-Boot source code - * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c) - * © Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas - * - * Add Programmable Multibit ECC support for various AT91 SoC - * © Copyright 2012 ATMEL, Hong Xu - * - * Add Nand Flash Controller support for SAMA5 SoC - * © Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include <linux/clk.h> -#include <linux/dma-mapping.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/platform_device.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/of_gpio.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/nand.h> -#include <linux/mtd/partitions.h> - -#include <linux/delay.h> -#include <linux/dmaengine.h> -#include <linux/gpio.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/platform_data/atmel.h> - -static int use_dma = 1; -module_param(use_dma, int, 0); - -static int on_flash_bbt = 0; -module_param(on_flash_bbt, int, 0); - -/* Register access macros */ -#define ecc_readl(add, reg) \ - __raw_readl(add + ATMEL_ECC_##reg) -#define ecc_writel(add, reg, value) \ - __raw_writel((value), add + ATMEL_ECC_##reg) - -#include "atmel_nand_ecc.h" /* Hardware ECC registers */ -#include "atmel_nand_nfc.h" /* Nand Flash Controller definition */ - -struct atmel_nand_caps { - bool pmecc_correct_erase_page; - uint8_t pmecc_max_correction; -}; - -/* - * oob layout for large page size - * bad block info is on bytes 0 and 1 - * the bytes have to be consecutives to avoid - * several NAND_CMD_RNDOUT during read - * - * oob layout for small page size - * bad block info is on bytes 4 and 5 - * the bytes have to be consecutives to avoid - * several NAND_CMD_RNDOUT during read - */ -static int atmel_ooblayout_ecc_sp(struct mtd_info *mtd, int section, - struct mtd_oob_region *oobregion) -{ - if (section) - return -ERANGE; - - oobregion->length = 4; - oobregion->offset = 0; - - return 0; -} - -static int atmel_ooblayout_free_sp(struct mtd_info *mtd, int section, - struct mtd_oob_region *oobregion) -{ - if (section) - return -ERANGE; - - oobregion->offset = 6; - oobregion->length = mtd->oobsize - oobregion->offset; - - return 0; -} - -static const struct mtd_ooblayout_ops atmel_ooblayout_sp_ops = { - .ecc = atmel_ooblayout_ecc_sp, - .free = atmel_ooblayout_free_sp, -}; - -struct atmel_nfc { - void __iomem *base_cmd_regs; - void __iomem *hsmc_regs; - void *sram_bank0; - dma_addr_t sram_bank0_phys; - bool use_nfc_sram; - bool write_by_sram; - - struct clk *clk; - - bool is_initialized; - struct completion comp_ready; - struct completion comp_cmd_done; - struct completion comp_xfer_done; - - /* Point to the sram bank which include readed data via NFC */ - void *data_in_sram; - bool will_write_sram; -}; -static struct atmel_nfc nand_nfc; - -struct atmel_nand_host { - struct nand_chip nand_chip; - void __iomem *io_base; - dma_addr_t io_phys; - struct atmel_nand_data board; - struct device *dev; - void __iomem *ecc; - - struct completion comp; - struct dma_chan *dma_chan; - - struct atmel_nfc *nfc; - - const struct atmel_nand_caps *caps; - bool has_pmecc; - u8 pmecc_corr_cap; - u16 pmecc_sector_size; - bool has_no_lookup_table; - u32 pmecc_lookup_table_offset; - u32 pmecc_lookup_table_offset_512; - u32 pmecc_lookup_table_offset_1024; - - int pmecc_degree; /* Degree of remainders */ - int pmecc_cw_len; /* Length of codeword */ - - void __iomem *pmerrloc_base; - void __iomem *pmerrloc_el_base; - void __iomem *pmecc_rom_base; - - /* lookup table for alpha_to and index_of */ - void __iomem *pmecc_alpha_to; - void __iomem *pmecc_index_of; - - /* data for pmecc computation */ - int16_t *pmecc_partial_syn; - int16_t *pmecc_si; - int16_t *pmecc_smu; /* Sigma table */ - int16_t *pmecc_lmu; /* polynomal order */ - int *pmecc_mu; - int *pmecc_dmu; - int *pmecc_delta; -}; - -/* - * Enable NAND. - */ -static void atmel_nand_enable(struct atmel_nand_host *host) -{ - if (gpio_is_valid(host->board.enable_pin)) - gpio_set_value(host->board.enable_pin, 0); -} - -/* - * Disable NAND. - */ -static void atmel_nand_disable(struct atmel_nand_host *host) -{ - if (gpio_is_valid(host->board.enable_pin)) - gpio_set_value(host->board.enable_pin, 1); -} - -/* - * Hardware specific access to control-lines - */ -static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) -{ - struct nand_chip *nand_chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(nand_chip); - - if (ctrl & NAND_CTRL_CHANGE) { - if (ctrl & NAND_NCE) - atmel_nand_enable(host); - else - atmel_nand_disable(host); - } - if (cmd == NAND_CMD_NONE) - return; - - if (ctrl & NAND_CLE) - writeb(cmd, host->io_base + (1 << host->board.cle)); - else - writeb(cmd, host->io_base + (1 << host->board.ale)); -} - -/* - * Read the Device Ready pin. - */ -static int atmel_nand_device_ready(struct mtd_info *mtd) -{ - struct nand_chip *nand_chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(nand_chip); - - return gpio_get_value(host->board.rdy_pin) ^ - !!host->board.rdy_pin_active_low; -} - -/* Set up for hardware ready pin and enable pin. */ -static int atmel_nand_set_enable_ready_pins(struct mtd_info *mtd) -{ - struct nand_chip *chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(chip); - int res = 0; - - if (gpio_is_valid(host->board.rdy_pin)) { - res = devm_gpio_request(host->dev, - host->board.rdy_pin, "nand_rdy"); - if (res < 0) { - dev_err(host->dev, - "can't request rdy gpio %d\n", - host->board.rdy_pin); - return res; - } - - res = gpio_direction_input(host->board.rdy_pin); - if (res < 0) { - dev_err(host->dev, - "can't request input direction rdy gpio %d\n", - host->board.rdy_pin); - return res; - } - - chip->dev_ready = atmel_nand_device_ready; - } - - if (gpio_is_valid(host->board.enable_pin)) { - res = devm_gpio_request(host->dev, - host->board.enable_pin, "nand_enable"); - if (res < 0) { - dev_err(host->dev, - "can't request enable gpio %d\n", - host->board.enable_pin); - return res; - } - - res = gpio_direction_output(host->board.enable_pin, 1); - if (res < 0) { - dev_err(host->dev, - "can't request output direction enable gpio %d\n", - host->board.enable_pin); - return res; - } - } - - return res; -} - -/* - * Minimal-overhead PIO for data access. - */ -static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len) -{ - struct nand_chip *nand_chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(nand_chip); - - if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) { - memcpy(buf, host->nfc->data_in_sram, len); - host->nfc->data_in_sram += len; - } else { - __raw_readsb(nand_chip->IO_ADDR_R, buf, len); - } -} - -static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len) -{ - struct nand_chip *nand_chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(nand_chip); - - if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) { - memcpy(buf, host->nfc->data_in_sram, len); - host->nfc->data_in_sram += len; - } else { - __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2); - } -} - -static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len) -{ - struct nand_chip *nand_chip = mtd_to_nand(mtd); - - __raw_writesb(nand_chip->IO_ADDR_W, buf, len); -} - -static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len) -{ - struct nand_chip *nand_chip = mtd_to_nand(mtd); - - __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2); -} - -static void dma_complete_func(void *completion) -{ - complete(completion); -} - -static int nfc_set_sram_bank(struct atmel_nand_host *host, unsigned int bank) -{ - /* NFC only has two banks. Must be 0 or 1 */ - if (bank > 1) - return -EINVAL; - - if (bank) { - struct mtd_info *mtd = nand_to_mtd(&host->nand_chip); - - /* Only for a 2k-page or lower flash, NFC can handle 2 banks */ - if (mtd->writesize > 2048) - return -EINVAL; - nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK1); - } else { - nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK0); - } - - return 0; -} - -static uint nfc_get_sram_off(struct atmel_nand_host *host) -{ - if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1) - return NFC_SRAM_BANK1_OFFSET; - else - return 0; -} - -static dma_addr_t nfc_sram_phys(struct atmel_nand_host *host) -{ - if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1) - return host->nfc->sram_bank0_phys + NFC_SRAM_BANK1_OFFSET; - else - return host->nfc->sram_bank0_phys; -} - -static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len, - int is_read) -{ - struct dma_device *dma_dev; - enum dma_ctrl_flags flags; - dma_addr_t dma_src_addr, dma_dst_addr, phys_addr; - struct dma_async_tx_descriptor *tx = NULL; - dma_cookie_t cookie; - struct nand_chip *chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(chip); - void *p = buf; - int err = -EIO; - enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; - struct atmel_nfc *nfc = host->nfc; - - if (buf >= high_memory) - goto err_buf; - - dma_dev = host->dma_chan->device; - - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; - - phys_addr = dma_map_single(dma_dev->dev, p, len, dir); - if (dma_mapping_error(dma_dev->dev, phys_addr)) { - dev_err(host->dev, "Failed to dma_map_single\n"); - goto err_buf; - } - - if (is_read) { - if (nfc && nfc->data_in_sram) - dma_src_addr = nfc_sram_phys(host) + (nfc->data_in_sram - - (nfc->sram_bank0 + nfc_get_sram_off(host))); - else - dma_src_addr = host->io_phys; - - dma_dst_addr = phys_addr; - } else { - dma_src_addr = phys_addr; - - if (nfc && nfc->write_by_sram) - dma_dst_addr = nfc_sram_phys(host); - else - dma_dst_addr = host->io_phys; - } - - tx = dma_dev->device_prep_dma_memcpy(host->dma_chan, dma_dst_addr, - dma_src_addr, len, flags); - if (!tx) { - dev_err(host->dev, "Failed to prepare DMA memcpy\n"); - goto err_dma; - } - - init_completion(&host->comp); - tx->callback = dma_complete_func; - tx->callback_param = &host->comp; - - cookie = tx->tx_submit(tx); - if (dma_submit_error(cookie)) { - dev_err(host->dev, "Failed to do DMA tx_submit\n"); - goto err_dma; - } - - dma_async_issue_pending(host->dma_chan); - wait_for_completion(&host->comp); - - if (is_read && nfc && nfc->data_in_sram) - /* After read data from SRAM, need to increase the position */ - nfc->data_in_sram += len; - - err = 0; - -err_dma: - dma_unmap_single(dma_dev->dev, phys_addr, len, dir); -err_buf: - if (err != 0) - dev_dbg(host->dev, "Fall back to CPU I/O\n"); - return err; -} - -static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) -{ - struct nand_chip *chip = mtd_to_nand(mtd); - - if (use_dma && len > mtd->oobsize) - /* only use DMA for bigger than oob size: better performances */ - if (atmel_nand_dma_op(mtd, buf, len, 1) == 0) - return; - - if (chip->options & NAND_BUSWIDTH_16) - atmel_read_buf16(mtd, buf, len); - else - atmel_read_buf8(mtd, buf, len); -} - -static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) -{ - struct nand_chip *chip = mtd_to_nand(mtd); - - if (use_dma && len > mtd->oobsize) - /* only use DMA for bigger than oob size: better performances */ - if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0) - return; - - if (chip->options & NAND_BUSWIDTH_16) - atmel_write_buf16(mtd, buf, len); - else - atmel_write_buf8(mtd, buf, len); -} - -/* - * Return number of ecc bytes per sector according to sector size and - * correction capability - * - * Following table shows what at91 PMECC supported: - * Correction Capability Sector_512_bytes Sector_1024_bytes - * ===================== ================ ================= - * 2-bits 4-bytes 4-bytes - * 4-bits 7-bytes 7-bytes - * 8-bits 13-bytes 14-bytes - * 12-bits 20-bytes 21-bytes - * 24-bits 39-bytes 42-bytes - * 32-bits 52-bytes 56-bytes - */ -static int pmecc_get_ecc_bytes(int cap, int sector_size) -{ - int m = 12 + sector_size / 512; - return (m * cap + 7) / 8; -} - -static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host) -{ - int table_size; - - table_size = host->pmecc_sector_size == 512 ? - PMECC_LOOKUP_TABLE_SIZE_512 : PMECC_LOOKUP_TABLE_SIZE_1024; - - return host->pmecc_rom_base + host->pmecc_lookup_table_offset + - table_size * sizeof(int16_t); -} - -static int pmecc_data_alloc(struct atmel_nand_host *host) -{ - const int cap = host->pmecc_corr_cap; - int size; - - size = (2 * cap + 1) * sizeof(int16_t); - host->pmecc_partial_syn = devm_kzalloc(host->dev, size, GFP_KERNEL); - host->pmecc_si = devm_kzalloc(host->dev, size, GFP_KERNEL); - host->pmecc_lmu = devm_kzalloc(host->dev, - (cap + 1) * sizeof(int16_t), GFP_KERNEL); - host->pmecc_smu = devm_kzalloc(host->dev, - (cap + 2) * size, GFP_KERNEL); - - size = (cap + 1) * sizeof(int); - host->pmecc_mu = devm_kzalloc(host->dev, size, GFP_KERNEL); - host->pmecc_dmu = devm_kzalloc(host->dev, size, GFP_KERNEL); - host->pmecc_delta = devm_kzalloc(host->dev, size, GFP_KERNEL); - - if (!host->pmecc_partial_syn || - !host->pmecc_si || - !host->pmecc_lmu || - !host->pmecc_smu || - !host->pmecc_mu || - !host->pmecc_dmu || - !host->pmecc_delta) - return -ENOMEM; - - return 0; -} - -static void pmecc_gen_syndrome(struct mtd_info *mtd, int sector) -{ - struct nand_chip *nand_chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(nand_chip); - int i; - uint32_t value; - - /* Fill odd syndromes */ - for (i = 0; i < host->pmecc_corr_cap; i++) { - value = pmecc_readl_rem_relaxed(host->ecc, sector, i / 2); - if (i & 1) - value >>= 16; - value &= 0xffff; - host->pmecc_partial_syn[(2 * i) + 1] = (int16_t)value; - } -} - -static void pmecc_substitute(struct mtd_info *mtd) -{ - struct nand_chip *nand_chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(nand_chip); - int16_t __iomem *alpha_to = host->pmecc_alpha_to; - int16_t __iomem *index_of = host->pmecc_index_of; - int16_t *partial_syn = host->pmecc_partial_syn; - const int cap = host->pmecc_corr_cap; - int16_t *si; - int i, j; - - /* si[] is a table that holds the current syndrome value, - * an element of that table belongs to the field - */ - si = host->pmecc_si; - - memset(&si[1], 0, sizeof(int16_t) * (2 * cap - 1)); - - /* Computation 2t syndromes based on S(x) */ - /* Odd syndromes */ - for (i = 1; i < 2 * cap; i += 2) { - for (j = 0; j < host->pmecc_degree; j++) { - if (partial_syn[i] & ((unsigned short)0x1 << j)) - si[i] = readw_relaxed(alpha_to + i * j) ^ si[i]; - } - } - /* Even syndrome = (Odd syndrome) ** 2 */ - for (i = 2, j = 1; j <= cap; i = ++j << 1) { - if (si[j] == 0) { - si[i] = 0; - } else { - int16_t tmp; - - tmp = readw_relaxed(index_of + si[j]); - tmp = (tmp * 2) % host->pmecc_cw_len; - si[i] = readw_relaxed(alpha_to + tmp); - } - } - - return; -} - -static void pmecc_get_sigma(struct mtd_info *mtd) -{ - struct nand_chip *nand_chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(nand_chip); - - int16_t *lmu = host->pmecc_lmu; - int16_t *si = host->pmecc_si; - int *mu = host->pmecc_mu; - int *dmu = host->pmecc_dmu; /* Discrepancy */ - int *delta = host->pmecc_delta; /* Delta order */ - int cw_len = host->pmecc_cw_len; - const int16_t cap = host->pmecc_corr_cap; - const int num = 2 * cap + 1; - int16_t __iomem *index_of = host->pmecc_index_of; - int16_t __iomem *alpha_to = host->pmecc_alpha_to; - int i, j, k; - uint32_t dmu_0_count, tmp; - int16_t *smu = host->pmecc_smu; - - /* index of largest delta */ - int ro; - int largest; - int diff; - - dmu_0_count = 0; - - /* First Row */ - - /* Mu */ - mu[0] = -1; - - memset(smu, 0, sizeof(int16_t) * num); - smu[0] = 1; - - /* discrepancy set to 1 */ - dmu[0] = 1; - /* polynom order set to 0 */ - lmu[0] = 0; - delta[0] = (mu[0] * 2 - lmu[0]) >> 1; - - /* Second Row */ - - /* Mu */ - mu[1] = 0; - /* Sigma(x) set to 1 */ - memset(&smu[num], 0, sizeof(int16_t) * num); - smu[num] = 1; - - /* discrepancy set to S1 */ - dmu[1] = si[1]; - - /* polynom order set to 0 */ - lmu[1] = 0; - - delta[1] = (mu[1] * 2 - lmu[1]) >> 1; - - /* Init the Sigma(x) last row */ - memset(&smu[(cap + 1) * num], 0, sizeof(int16_t) * num); - - for (i = 1; i <= cap; i++) { - mu[i + 1] = i << 1; - /* Begin Computing Sigma (Mu+1) and L(mu) */ - /* check if discrepancy is set to 0 */ - if (dmu[i] == 0) { - dmu_0_count++; - - tmp = ((cap - (lmu[i] >> 1) - 1) / 2); - if ((cap - (lmu[i] >> 1) - 1) & 0x1) - tmp += 2; - else - tmp += 1; - - if (dmu_0_count == tmp) { - for (j = 0; j <= (lmu[i] >> 1) + 1; j++) - smu[(cap + 1) * num + j] = - smu[i * num + j]; - - lmu[cap + 1] = lmu[i]; - return; - } - - /* copy polynom */ - for (j = 0; j <= lmu[i] >> 1; j++) - smu[(i + 1) * num + j] = smu[i * num + j]; - - /* copy previous polynom order to the next */ - lmu[i + 1] = lmu[i]; - } else { - ro = 0; - largest = -1; - /* find largest delta with dmu != 0 */ - for (j = 0; j < i; j++) { - if ((dmu[j]) && (delta[j] > largest)) { - largest = delta[j]; - ro = j; - } - } - - /* compute difference */ - diff = (mu[i] - mu[ro]); - - /* Compute degree of the new smu polynomial */ - if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff)) - lmu[i + 1] = lmu[i]; - else - lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2; - - /* Init smu[i+1] with 0 */ - for (k = 0; k < num; k++) - smu[(i + 1) * num + k] = 0; - - /* Compute smu[i+1] */ - for (k = 0; k <= lmu[ro] >> 1; k++) { - int16_t a, b, c; - - if (!(smu[ro * num + k] && dmu[i])) - continue; - a = readw_relaxed(index_of + dmu[i]); - b = readw_relaxed(index_of + dmu[ro]); - c = readw_relaxed(index_of + smu[ro * num + k]); - tmp = a + (cw_len - b) + c; - a = readw_relaxed(alpha_to + tmp % cw_len); - smu[(i + 1) * num + (k + diff)] = a; - } - - for (k = 0; k <= lmu[i] >> 1; k++) - smu[(i + 1) * num + k] ^= smu[i * num + k]; - } - - /* End Computing Sigma (Mu+1) and L(mu) */ - /* In either case compute delta */ - delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1; - - /* Do not compute discrepancy for the last iteration */ - if (i >= cap) - continue; - - for (k = 0; k <= (lmu[i + 1] >> 1); k++) { - tmp = 2 * (i - 1); - if (k == 0) { - dmu[i + 1] = si[tmp + 3]; - } else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) { - int16_t a, b, c; - a = readw_relaxed(index_of + - smu[(i + 1) * num + k]); - b = si[2 * (i - 1) + 3 - k]; - c = readw_relaxed(index_of + b); - tmp = a + c; - tmp %= cw_len; - dmu[i + 1] = readw_relaxed(alpha_to + tmp) ^ - dmu[i + 1]; - } - } - } - - return; -} - -static int pmecc_err_location(struct mtd_info *mtd) -{ - struct nand_chip *nand_chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(nand_chip); - unsigned long end_time; - const int cap = host->pmecc_corr_cap; - const int num = 2 * cap + 1; - int sector_size = host->pmecc_sector_size; - int err_nbr = 0; /* number of error */ - int roots_nbr; /* number of roots */ - int i; - uint32_t val; - int16_t *smu = host->pmecc_smu; - - pmerrloc_writel(host->pmerrloc_base, ELDIS, PMERRLOC_DISABLE); - - for (i = 0; i <= host->pmecc_lmu[cap + 1] >> 1; i++) { - pmerrloc_writel_sigma_relaxed(host->pmerrloc_base, i, - smu[(cap + 1) * num + i]); - err_nbr++; - } - - val = (err_nbr - 1) << 16; - if (sector_size == 1024) - val |= 1; - - pmerrloc_writel(host->pmerrloc_base, ELCFG, val); - pmerrloc_writel(host->pmerrloc_base, ELEN, - sector_size * 8 + host->pmecc_degree * cap); - - end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS); - while (!(pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR) - & PMERRLOC_CALC_DONE)) { - if (unlikely(time_after(jiffies, end_time))) { - dev_err(host->dev, "PMECC: Timeout to calculate error location.\n"); - return -1; - } - cpu_relax(); - } - - roots_nbr = (pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR) - & PMERRLOC_ERR_NUM_MASK) >> 8; - /* Number of roots == degree of smu hence <= cap */ - if (roots_nbr == host->pmecc_lmu[cap + 1] >> 1) - return err_nbr - 1; - - /* Number of roots does not match the degree of smu - * unable to correct error */ - return -1; -} - -static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc, - int sector_num, int extra_bytes, int err_nbr) -{ - struct nand_chip *nand_chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(nand_chip); - int i = 0; - int byte_pos, bit_pos, sector_size, pos; - uint32_t tmp; - uint8_t err_byte; - - sector_size = host->pmecc_sector_size; - - while (err_nbr) { - tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_el_base, i) - 1; - byte_pos = tmp / 8; - bit_pos = tmp % 8; - - if (byte_pos >= (sector_size + extra_bytes)) - BUG(); /* should never happen */ - - if (byte_pos < sector_size) { - err_byte = *(buf + byte_pos); - *(buf + byte_pos) ^= (1 << bit_pos); - - pos = sector_num * host->pmecc_sector_size + byte_pos; - dev_dbg(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", - pos, bit_pos, err_byte, *(buf + byte_pos)); - } else { - struct mtd_oob_region oobregion; - - /* Bit flip in OOB area */ - tmp = sector_num * nand_chip->ecc.bytes - + (byte_pos - sector_size); - err_byte = ecc[tmp]; - ecc[tmp] ^= (1 << bit_pos); - - mtd_ooblayout_ecc(mtd, 0, &oobregion); - pos = tmp + oobregion.offset; - dev_dbg(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", - pos, bit_pos, err_byte, ecc[tmp]); - } - - i++; - err_nbr--; - } - - return; -} - -static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf, - u8 *ecc) -{ - struct nand_chip *nand_chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(nand_chip); - int i, err_nbr; - uint8_t *buf_pos; - int max_bitflips = 0; - - for (i = 0; i < nand_chip->ecc.steps; i++) { - err_nbr = 0; - if (pmecc_stat & 0x1) { - buf_pos = buf + i * host->pmecc_sector_size; - - pmecc_gen_syndrome(mtd, i); - pmecc_substitute(mtd); - pmecc_get_sigma(mtd); - - err_nbr = pmecc_err_location(mtd); - if (err_nbr >= 0) { - pmecc_correct_data(mtd, buf_pos, ecc, i, - nand_chip->ecc.bytes, - err_nbr); - } else if (!host->caps->pmecc_correct_erase_page) { - u8 *ecc_pos = ecc + (i * nand_chip->ecc.bytes); - - /* Try to detect erased pages */ - err_nbr = nand_check_erased_ecc_chunk(buf_pos, - host->pmecc_sector_size, - ecc_pos, - nand_chip->ecc.bytes, - NULL, 0, - nand_chip->ecc.strength); - } - - if (err_nbr < 0) { - dev_err(host->dev, "PMECC: Too many errors\n"); - mtd->ecc_stats.failed++; - return -EIO; - } - - mtd->ecc_stats.corrected += err_nbr; - max_bitflips = max_t(int, max_bitflips, err_nbr); - } - pmecc_stat >>= 1; - } - - return max_bitflips; -} - -static void pmecc_enable(struct atmel_nand_host *host, int ecc_op) -{ - u32 val; - - if (ecc_op != NAND_ECC_READ && ecc_op != NAND_ECC_WRITE) { - dev_err(host->dev, "atmel_nand: wrong pmecc operation type!"); - return; - } - - pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST); - pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE); - val = pmecc_readl_relaxed(host->ecc, CFG); - - if (ecc_op == NAND_ECC_READ) - pmecc_writel(host->ecc, CFG, (val & ~PMECC_CFG_WRITE_OP) - | PMECC_CFG_AUTO_ENABLE); - else - pmecc_writel(host->ecc, CFG, (val | PMECC_CFG_WRITE_OP) - & ~PMECC_CFG_AUTO_ENABLE); - - pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE); - pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA); -} - -static int atmel_nand_pmecc_read_page(struct mtd_info *mtd, - struct nand_chip *chip, uint8_t *buf, int oob_required, int page) -{ - struct atmel_nand_host *host = nand_get_controller_data(chip); - int eccsize = chip->ecc.size * chip->ecc.steps; - uint8_t *oob = chip->oob_poi; - uint32_t stat; - unsigned long end_time; - int bitflips = 0; - - if (!host->nfc || !host->nfc->use_nfc_sram) - pmecc_enable(host, NAND_ECC_READ); - - chip->read_buf(mtd, buf, eccsize); - chip->read_buf(mtd, oob, mtd->oobsize); - - end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS); - while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) { - if (unlikely(time_after(jiffies, end_time))) { - dev_err(host->dev, "PMECC: Timeout to get error status.\n"); - return -EIO; - } - cpu_relax(); - } - - stat = pmecc_readl_relaxed(host->ecc, ISR); - if (stat != 0) { - struct mtd_oob_region oobregion; - - mtd_ooblayout_ecc(mtd, 0, &oobregion); - bitflips = pmecc_correction(mtd, stat, buf, - &oob[oobregion.offset]); - if (bitflips < 0) - /* uncorrectable errors */ - return 0; - } - - return bitflips; -} - -static int atmel_nand_pmecc_write_page(struct mtd_info *mtd, - struct nand_chip *chip, const uint8_t *buf, int oob_required, - int page) -{ - struct atmel_nand_host *host = nand_get_controller_data(chip); - struct mtd_oob_region oobregion = { }; - int i, j, section = 0; - unsigned long end_time; - - if (!host->nfc || !host->nfc->write_by_sram) { - pmecc_enable(host, NAND_ECC_WRITE); - chip->write_buf(mtd, (u8 *)buf, mtd->writesize); - } - - end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS); - while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) { - if (unlikely(time_after(jiffies, end_time))) { - dev_err(host->dev, "PMECC: Timeout to get ECC value.\n"); - return -EIO; - } - cpu_relax(); - } - - for (i = 0; i < chip->ecc.steps; i++) { - for (j = 0; j < chip->ecc.bytes; j++) { - if (!oobregion.length) - mtd_ooblayout_ecc(mtd, section, &oobregion); - - chip->oob_poi[oobregion.offset] = - pmecc_readb_ecc_relaxed(host->ecc, i, j); - oobregion.length--; - oobregion.offset++; - section++; - } - } - chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); - - return 0; -} - -static void atmel_pmecc_core_init(struct mtd_info *mtd) -{ - struct nand_chip *nand_chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(nand_chip); - int eccbytes = mtd_ooblayout_count_eccbytes(mtd); - uint32_t val = 0; - struct mtd_oob_region oobregion; - - pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST); - pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE); - - switch (host->pmecc_corr_cap) { - case 2: - val = PMECC_CFG_BCH_ERR2; - break; - case 4: - val = PMECC_CFG_BCH_ERR4; - break; - case 8: - val = PMECC_CFG_BCH_ERR8; - break; - case 12: - val = PMECC_CFG_BCH_ERR12; - break; - case 24: - val = PMECC_CFG_BCH_ERR24; - break; - case 32: - val = PMECC_CFG_BCH_ERR32; - break; - } - - if (host->pmecc_sector_size == 512) - val |= PMECC_CFG_SECTOR512; - else if (host->pmecc_sector_size == 1024) - val |= PMECC_CFG_SECTOR1024; - - switch (nand_chip->ecc.steps) { - case 1: - val |= PMECC_CFG_PAGE_1SECTOR; - break; - case 2: - val |= PMECC_CFG_PAGE_2SECTORS; - break; - case 4: - val |= PMECC_CFG_PAGE_4SECTORS; - break; - case 8: - val |= PMECC_CFG_PAGE_8SECTORS; - break; - } - - val |= (PMECC_CFG_READ_OP | PMECC_CFG_SPARE_DISABLE - | PMECC_CFG_AUTO_DISABLE); - pmecc_writel(host->ecc, CFG, val); - - pmecc_writel(host->ecc, SAREA, mtd->oobsize - 1); - mtd_ooblayout_ecc(mtd, 0, &oobregion); - pmecc_writel(host->ecc, SADDR, oobregion.offset); - pmecc_writel(host->ecc, EADDR, - oobregion.offset + eccbytes - 1); - /* See datasheet about PMECC Clock Control Register */ - pmecc_writel(host->ecc, CLK, 2); - pmecc_writel(host->ecc, IDR, 0xff); - pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE); -} - -/* - * Get minimum ecc requirements from NAND. - * If pmecc-cap, pmecc-sector-size in DTS are not specified, this function - * will set them according to minimum ecc requirement. Otherwise, use the - * value in DTS file. - * return 0 if success. otherwise return error code. - */ -static int pmecc_choose_ecc(struct atmel_nand_host *host, - int *cap, int *sector_size) -{ - /* Get minimum ECC requirements */ - if (host->nand_chip.ecc_strength_ds) { - *cap = host->nand_chip.ecc_strength_ds; - *sector_size = host->nand_chip.ecc_step_ds; - dev_info(host->dev, "minimum ECC: %d bits in %d bytes\n", - *cap, *sector_size); - } else { - *cap = 2; - *sector_size = 512; - dev_info(host->dev, "can't detect min. ECC, assume 2 bits in 512 bytes\n"); - } - - /* If device tree doesn't specify, use NAND's minimum ECC parameters */ - if (host->pmecc_corr_cap == 0) { - if (*cap > host->caps->pmecc_max_correction) - return -EINVAL; - - /* use the most fitable ecc bits (the near bigger one ) */ - if (*cap <= 2) - host->pmecc_corr_cap = 2; - else if (*cap <= 4) - host->pmecc_corr_cap = 4; - else if (*cap <= 8) - host->pmecc_corr_cap = 8; - else if (*cap <= 12) - host->pmecc_corr_cap = 12; - else if (*cap <= 24) - host->pmecc_corr_cap = 24; - else if (*cap <= 32) - host->pmecc_corr_cap = 32; - else - return -EINVAL; - } - if (host->pmecc_sector_size == 0) { - /* use the most fitable sector size (the near smaller one ) */ - if (*sector_size >= 1024) - host->pmecc_sector_size = 1024; - else if (*sector_size >= 512) - host->pmecc_sector_size = 512; - else - return -EINVAL; - } - return 0; -} - -static inline int deg(unsigned int poly) -{ - /* polynomial degree is the most-significant bit index */ - return fls(poly) - 1; -} - -static int build_gf_tables(int mm, unsigned int poly, - int16_t *index_of, int16_t *alpha_to) -{ - unsigned int i, x = 1; - const unsigned int k = 1 << deg(poly); - unsigned int nn = (1 << mm) - 1; - - /* primitive polynomial must be of degree m */ - if (k != (1u << mm)) - return -EINVAL; - - for (i = 0; i < nn; i++) { - alpha_to[i] = x; - index_of[x] = i; - if (i && (x == 1)) - /* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */ - return -EINVAL; - x <<= 1; - if (x & k) - x ^= poly; - } - alpha_to[nn] = 1; - index_of[0] = 0; - - return 0; -} - -static uint16_t *create_lookup_table(struct device *dev, int sector_size) -{ - int degree = (sector_size == 512) ? - PMECC_GF_DIMENSION_13 : - PMECC_GF_DIMENSION_14; - unsigned int poly = (sector_size == 512) ? - PMECC_GF_13_PRIMITIVE_POLY : - PMECC_GF_14_PRIMITIVE_POLY; - int table_size = (sector_size == 512) ? - PMECC_LOOKUP_TABLE_SIZE_512 : - PMECC_LOOKUP_TABLE_SIZE_1024; - - int16_t *addr = devm_kzalloc(dev, 2 * table_size * sizeof(uint16_t), - GFP_KERNEL); - if (addr && build_gf_tables(degree, poly, addr, addr + table_size)) - return NULL; - - return addr; -} - -static int atmel_pmecc_nand_init_params(struct platform_device *pdev, - struct atmel_nand_host *host) -{ - struct nand_chip *nand_chip = &host->nand_chip; - struct mtd_info *mtd = nand_to_mtd(nand_chip); - struct resource *regs, *regs_pmerr, *regs_rom; - uint16_t *galois_table; - int cap, sector_size, err_no; - - err_no = pmecc_choose_ecc(host, &cap, §or_size); - if (err_no) { - dev_err(host->dev, "The NAND flash's ECC requirement are not support!"); - return err_no; - } - - if (cap > host->pmecc_corr_cap || - sector_size != host->pmecc_sector_size) - dev_info(host->dev, "WARNING: Be Caution! Using different PMECC parameters from Nand ONFI ECC reqirement.\n"); - - cap = host->pmecc_corr_cap; - sector_size = host->pmecc_sector_size; - host->pmecc_lookup_table_offset = (sector_size == 512) ? - host->pmecc_lookup_table_offset_512 : - host->pmecc_lookup_table_offset_1024; - - dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n", - cap, sector_size); - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!regs) { - dev_warn(host->dev, - "Can't get I/O resource regs for PMECC controller, rolling back on software ECC\n"); - nand_chip->ecc.mode = NAND_ECC_SOFT; - nand_chip->ecc.algo = NAND_ECC_HAMMING; - return 0; - } - - host->ecc = devm_ioremap_resource(&pdev->dev, regs); - if (IS_ERR(host->ecc)) { - err_no = PTR_ERR(host->ecc); - goto err; - } - - regs_pmerr = platform_get_resource(pdev, IORESOURCE_MEM, 2); - host->pmerrloc_base = devm_ioremap_resource(&pdev->dev, regs_pmerr); - if (IS_ERR(host->pmerrloc_base)) { - err_no = PTR_ERR(host->pmerrloc_base); - goto err; - } - host->pmerrloc_el_base = host->pmerrloc_base + ATMEL_PMERRLOC_SIGMAx + - (host->caps->pmecc_max_correction + 1) * 4; - - if (!host->has_no_lookup_table) { - regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3); - host->pmecc_rom_base = devm_ioremap_resource(&pdev->dev, - regs_rom); - if (IS_ERR(host->pmecc_rom_base)) { - dev_err(host->dev, "Can not get I/O resource for ROM, will build a lookup table in runtime!\n"); - host->has_no_lookup_table = true; - } - } - - if (host->has_no_lookup_table) { - /* Build the look-up table in runtime */ - galois_table = create_lookup_table(host->dev, sector_size); - if (!galois_table) { - dev_err(host->dev, "Failed to build a lookup table in runtime!\n"); - err_no = -EINVAL; - goto err; - } - - host->pmecc_rom_base = (void __iomem *)galois_table; - host->pmecc_lookup_table_offset = 0; - } - - nand_chip->ecc.size = sector_size; - - /* set ECC page size and oob layout */ - switch (mtd->writesize) { - case 512: - case 1024: - case 2048: - case 4096: - case 8192: - if (sector_size > mtd->writesize) { - dev_err(host->dev, "pmecc sector size is bigger than the page size!\n"); - err_no = -EINVAL; - goto err; - } - - host->pmecc_degree = (sector_size == 512) ? - PMECC_GF_DIMENSION_13 : PMECC_GF_DIMENSION_14; - host->pmecc_cw_len = (1 << host->pmecc_degree) - 1; - host->pmecc_alpha_to = pmecc_get_alpha_to(host); - host->pmecc_index_of = host->pmecc_rom_base + - host->pmecc_lookup_table_offset; - - nand_chip->ecc.strength = cap; - nand_chip->ecc.bytes = pmecc_get_ecc_bytes(cap, sector_size); - nand_chip->ecc.steps = mtd->writesize / sector_size; - nand_chip->ecc.total = nand_chip->ecc.bytes * - nand_chip->ecc.steps; - if (nand_chip->ecc.total > - mtd->oobsize - PMECC_OOB_RESERVED_BYTES) { - dev_err(host->dev, "No room for ECC bytes\n"); - err_no = -EINVAL; - goto err; - } - - mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); - break; - default: - dev_warn(host->dev, - "Unsupported page size for PMECC, use Software ECC\n"); - /* page size not handled by HW ECC */ - /* switching back to soft ECC */ - nand_chip->ecc.mode = NAND_ECC_SOFT; - nand_chip->ecc.algo = NAND_ECC_HAMMING; - return 0; - } - - /* Allocate data for PMECC computation */ - err_no = pmecc_data_alloc(host); - if (err_no) { - dev_err(host->dev, - "Cannot allocate memory for PMECC computation!\n"); - goto err; - } - - nand_chip->options |= NAND_NO_SUBPAGE_WRITE; - nand_chip->ecc.read_page = atmel_nand_pmecc_read_page; - nand_chip->ecc.write_page = atmel_nand_pmecc_write_page; - - atmel_pmecc_core_init(mtd); - - return 0; - -err: - return err_no; -} - -/* - * Calculate HW ECC - * - * function called after a write - * - * mtd: MTD block structure - * dat: raw data (unused) - * ecc_code: buffer for ECC - */ -static int atmel_nand_calculate(struct mtd_info *mtd, - const u_char *dat, unsigned char *ecc_code) -{ - struct nand_chip *nand_chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(nand_chip); - unsigned int ecc_value; - - /* get the first 2 ECC bytes */ - ecc_value = ecc_readl(host->ecc, PR); - - ecc_code[0] = ecc_value & 0xFF; - ecc_code[1] = (ecc_value >> 8) & 0xFF; - - /* get the last 2 ECC bytes */ - ecc_value = ecc_readl(host->ecc, NPR) & ATMEL_ECC_NPARITY; - - ecc_code[2] = ecc_value & 0xFF; - ecc_code[3] = (ecc_value >> 8) & 0xFF; - - return 0; -} - -/* - * HW ECC read page function - * - * mtd: mtd info structure - * chip: nand chip info structure - * buf: buffer to store read data - * oob_required: caller expects OOB data read to chip->oob_poi - */ -static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int oob_required, int page) -{ - int eccsize = chip->ecc.size; - int eccbytes = chip->ecc.bytes; - uint8_t *p = buf; - uint8_t *oob = chip->oob_poi; - uint8_t *ecc_pos; - int stat; - unsigned int max_bitflips = 0; - struct mtd_oob_region oobregion = {}; - - /* - * Errata: ALE is incorrectly wired up to the ECC controller - * on the AP7000, so it will include the address cycles in the - * ECC calculation. - * - * Workaround: Reset the parity registers before reading the - * actual data. - */ - struct atmel_nand_host *host = nand_get_controller_data(chip); - if (host->board.need_reset_workaround) - ecc_writel(host->ecc, CR, ATMEL_ECC_RST); - - /* read the page */ - chip->read_buf(mtd, p, eccsize); - - /* move to ECC position if needed */ - mtd_ooblayout_ecc(mtd, 0, &oobregion); - if (oobregion.offset != 0) { - /* - * This only works on large pages because the ECC controller - * waits for NAND_CMD_RNDOUTSTART after the NAND_CMD_RNDOUT. - * Anyway, for small pages, the first ECC byte is at offset - * 0 in the OOB area. - */ - chip->cmdfunc(mtd, NAND_CMD_RNDOUT, - mtd->writesize + oobregion.offset, -1); - } - - /* the ECC controller needs to read the ECC just after the data */ - ecc_pos = oob + oobregion.offset; - chip->read_buf(mtd, ecc_pos, eccbytes); - - /* check if there's an error */ - stat = chip->ecc.correct(mtd, p, oob, NULL); - - if (stat < 0) { - mtd->ecc_stats.failed++; - } else { - mtd->ecc_stats.corrected += stat; - max_bitflips = max_t(unsigned int, max_bitflips, stat); - } - - /* get back to oob start (end of page) */ - chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1); - - /* read the oob */ - chip->read_buf(mtd, oob, mtd->oobsize); - - return max_bitflips; -} - -/* - * HW ECC Correction - * - * function called after a read - * - * mtd: MTD block structure - * dat: raw data read from the chip - * read_ecc: ECC from the chip (unused) - * isnull: unused - * - * Detect and correct a 1 bit error for a page - */ -static int atmel_nand_correct(struct mtd_info *mtd, u_char *dat, - u_char *read_ecc, u_char *isnull) -{ - struct nand_chip *nand_chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(nand_chip); - unsigned int ecc_status; - unsigned int ecc_word, ecc_bit; - - /* get the status from the Status Register */ - ecc_status = ecc_readl(host->ecc, SR); - - /* if there's no error */ - if (likely(!(ecc_status & ATMEL_ECC_RECERR))) - return 0; - - /* get error bit offset (4 bits) */ - ecc_bit = ecc_readl(host->ecc, PR) & ATMEL_ECC_BITADDR; - /* get word address (12 bits) */ - ecc_word = ecc_readl(host->ecc, PR) & ATMEL_ECC_WORDADDR; - ecc_word >>= 4; - - /* if there are multiple errors */ - if (ecc_status & ATMEL_ECC_MULERR) { - /* check if it is a freshly erased block - * (filled with 0xff) */ - if ((ecc_bit == ATMEL_ECC_BITADDR) - && (ecc_word == (ATMEL_ECC_WORDADDR >> 4))) { - /* the block has just been erased, return OK */ - return 0; - } - /* it doesn't seems to be a freshly - * erased block. - * We can't correct so many errors */ - dev_dbg(host->dev, "atmel_nand : multiple errors detected." - " Unable to correct.\n"); - return -EBADMSG; - } - - /* if there's a single bit error : we can correct it */ - if (ecc_status & ATMEL_ECC_ECCERR) { - /* there's nothing much to do here. - * the bit error is on the ECC itself. - */ - dev_dbg(host->dev, "atmel_nand : one bit error on ECC code." - " Nothing to correct\n"); - return 0; - } - - dev_dbg(host->dev, "atmel_nand : one bit error on data." - " (word offset in the page :" - " 0x%x bit offset : 0x%x)\n", - ecc_word, ecc_bit); - /* correct the error */ - if (nand_chip->options & NAND_BUSWIDTH_16) { - /* 16 bits words */ - ((unsigned short *) dat)[ecc_word] ^= (1 << ecc_bit); - } else { - /* 8 bits words */ - dat[ecc_word] ^= (1 << ecc_bit); - } - dev_dbg(host->dev, "atmel_nand : error corrected\n"); - return 1; -} - -/* - * Enable HW ECC : unused on most chips - */ -static void atmel_nand_hwctl(struct mtd_info *mtd, int mode) -{ - struct nand_chip *nand_chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(nand_chip); - - if (host->board.need_reset_workaround) - ecc_writel(host->ecc, CR, ATMEL_ECC_RST); -} - -static int atmel_of_init_ecc(struct atmel_nand_host *host, - struct device_node *np) -{ - u32 offset[2]; - u32 val; - - host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc"); - - /* Not using PMECC */ - if (!(host->nand_chip.ecc.mode == NAND_ECC_HW) || !host->has_pmecc) - return 0; - - /* use PMECC, get correction capability, sector size and lookup - * table offset. - * If correction bits and sector size are not specified, then find - * them from NAND ONFI parameters. - */ - if (of_property_read_u32(np, "atmel,pmecc-cap", &val) == 0) { - if (val > host->caps->pmecc_max_correction) { - dev_err(host->dev, - "Required ECC strength too high: %u max %u\n", - val, host->caps->pmecc_max_correction); - return -EINVAL; - } - if ((val != 2) && (val != 4) && (val != 8) && - (val != 12) && (val != 24) && (val != 32)) { - dev_err(host->dev, - "Required ECC strength not supported: %u\n", - val); - return -EINVAL; - } - host->pmecc_corr_cap = (u8)val; - } - - if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) { - if ((val != 512) && (val != 1024)) { - dev_err(host->dev, - "Required ECC sector size not supported: %u\n", - val); - return -EINVAL; - } - host->pmecc_sector_size = (u16)val; - } - - if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset", - offset, 2) != 0) { - dev_err(host->dev, "Cannot get PMECC lookup table offset, will build a lookup table in runtime.\n"); - host->has_no_lookup_table = true; - /* Will build a lookup table and initialize the offset later */ - return 0; - } - - if (!offset[0] && !offset[1]) { - dev_err(host->dev, "Invalid PMECC lookup table offset\n"); - return -EINVAL; - } - - host->pmecc_lookup_table_offset_512 = offset[0]; - host->pmecc_lookup_table_offset_1024 = offset[1]; - - return 0; -} - -static int atmel_of_init_port(struct atmel_nand_host *host, - struct device_node *np) -{ - u32 val; - struct atmel_nand_data *board = &host->board; - enum of_gpio_flags flags = 0; - - host->caps = (struct atmel_nand_caps *) - of_device_get_match_data(host->dev); - - if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) { - if (val >= 32) { - dev_err(host->dev, "invalid addr-offset %u\n", val); - return -EINVAL; - } - board->ale = val; - } - - if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) { - if (val >= 32) { - dev_err(host->dev, "invalid cmd-offset %u\n", val); - return -EINVAL; - } - board->cle = val; - } - - board->has_dma = of_property_read_bool(np, "atmel,nand-has-dma"); - - board->rdy_pin = of_get_gpio_flags(np, 0, &flags); - board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW); - - board->enable_pin = of_get_gpio(np, 1); - board->det_pin = of_get_gpio(np, 2); - - /* load the nfc driver if there is */ - of_platform_populate(np, NULL, NULL, host->dev); - - /* - * Initialize ECC mode to NAND_ECC_SOFT so that we have a correct value - * even if the nand-ecc-mode property is not defined. - */ - host->nand_chip.ecc.mode = NAND_ECC_SOFT; - host->nand_chip.ecc.algo = NAND_ECC_HAMMING; - - return 0; -} - -static int atmel_hw_nand_init_params(struct platform_device *pdev, - struct atmel_nand_host *host) -{ - struct nand_chip *nand_chip = &host->nand_chip; - struct mtd_info *mtd = nand_to_mtd(nand_chip); - struct resource *regs; - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!regs) { - dev_err(host->dev, - "Can't get I/O resource regs, use software ECC\n"); - nand_chip->ecc.mode = NAND_ECC_SOFT; - nand_chip->ecc.algo = NAND_ECC_HAMMING; - return 0; - } - - host->ecc = devm_ioremap_resource(&pdev->dev, regs); - if (IS_ERR(host->ecc)) - return PTR_ERR(host->ecc); - - /* ECC is calculated for the whole page (1 step) */ - nand_chip->ecc.size = mtd->writesize; - - /* set ECC page size and oob layout */ - switch (mtd->writesize) { - case 512: - mtd_set_ooblayout(mtd, &atmel_ooblayout_sp_ops); - ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528); - break; - case 1024: - mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); - ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056); - break; - case 2048: - mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); - ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112); - break; - case 4096: - mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); - ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224); - break; - default: - /* page size not handled by HW ECC */ - /* switching back to soft ECC */ - nand_chip->ecc.mode = NAND_ECC_SOFT; - nand_chip->ecc.algo = NAND_ECC_HAMMING; - return 0; - } - - /* set up for HW ECC */ - nand_chip->ecc.calculate = atmel_nand_calculate; - nand_chip->ecc.correct = atmel_nand_correct; - nand_chip->ecc.hwctl = atmel_nand_hwctl; - nand_chip->ecc.read_page = atmel_nand_read_page; - nand_chip->ecc.bytes = 4; - nand_chip->ecc.strength = 1; - - return 0; -} - -static inline u32 nfc_read_status(struct atmel_nand_host *host) -{ - u32 err_flags = NFC_SR_DTOE | NFC_SR_UNDEF | NFC_SR_AWB | NFC_SR_ASE; - u32 nfc_status = nfc_readl(host->nfc->hsmc_regs, SR); - - if (unlikely(nfc_status & err_flags)) { - if (nfc_status & NFC_SR_DTOE) - dev_err(host->dev, "NFC: Waiting Nand R/B Timeout Error\n"); - else if (nfc_status & NFC_SR_UNDEF) - dev_err(host->dev, "NFC: Access Undefined Area Error\n"); - else if (nfc_status & NFC_SR_AWB) - dev_err(host->dev, "NFC: Access memory While NFC is busy\n"); - else if (nfc_status & NFC_SR_ASE) - dev_err(host->dev, "NFC: Access memory Size Error\n"); - } - - return nfc_status; -} - -/* SMC interrupt service routine */ -static irqreturn_t hsmc_interrupt(int irq, void *dev_id) -{ - struct atmel_nand_host *host = dev_id; - u32 status, mask, pending; - irqreturn_t ret = IRQ_NONE; - - status = nfc_read_status(host); - mask = nfc_readl(host->nfc->hsmc_regs, IMR); - pending = status & mask; - - if (pending & NFC_SR_XFR_DONE) { - complete(&host->nfc->comp_xfer_done); - nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE); - ret = IRQ_HANDLED; - } - if (pending & NFC_SR_RB_EDGE) { - complete(&host->nfc->comp_ready); - nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE); - ret = IRQ_HANDLED; - } - if (pending & NFC_SR_CMD_DONE) { - complete(&host->nfc->comp_cmd_done); - nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_CMD_DONE); - ret = IRQ_HANDLED; - } - - return ret; -} - -/* NFC(Nand Flash Controller) related functions */ -static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag) -{ - if (flag & NFC_SR_XFR_DONE) - init_completion(&host->nfc->comp_xfer_done); - - if (flag & NFC_SR_RB_EDGE) - init_completion(&host->nfc->comp_ready); - - if (flag & NFC_SR_CMD_DONE) - init_completion(&host->nfc->comp_cmd_done); - - /* Enable interrupt that need to wait for */ - nfc_writel(host->nfc->hsmc_regs, IER, flag); -} - -static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag) -{ - int i, index = 0; - struct completion *comp[3]; /* Support 3 interrupt completion */ - - if (flag & NFC_SR_XFR_DONE) - comp[index++] = &host->nfc->comp_xfer_done; - - if (flag & NFC_SR_RB_EDGE) - comp[index++] = &host->nfc->comp_ready; - - if (flag & NFC_SR_CMD_DONE) - comp[index++] = &host->nfc->comp_cmd_done; - - if (index == 0) { - dev_err(host->dev, "Unknown interrupt flag: 0x%08x\n", flag); - return -EINVAL; - } - - for (i = 0; i < index; i++) { - if (wait_for_completion_timeout(comp[i], - msecs_to_jiffies(NFC_TIME_OUT_MS))) - continue; /* wait for next completion */ - else - goto err_timeout; - } - - return 0; - -err_timeout: - dev_err(host->dev, "Time out to wait for interrupt: 0x%08x\n", flag); - /* Disable the interrupt as it is not handled by interrupt handler */ - nfc_writel(host->nfc->hsmc_regs, IDR, flag); - return -ETIMEDOUT; -} - -static int nfc_send_command(struct atmel_nand_host *host, - unsigned int cmd, unsigned int addr, unsigned char cycle0) -{ - unsigned long timeout; - u32 flag = NFC_SR_CMD_DONE; - flag |= cmd & NFCADDR_CMD_DATAEN ? NFC_SR_XFR_DONE : 0; - - dev_dbg(host->dev, - "nfc_cmd: 0x%08x, addr1234: 0x%08x, cycle0: 0x%02x\n", - cmd, addr, cycle0); - - timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS); - while (nfc_readl(host->nfc->hsmc_regs, SR) & NFC_SR_BUSY) { - if (time_after(jiffies, timeout)) { - dev_err(host->dev, - "Time out to wait for NFC ready!\n"); - return -ETIMEDOUT; - } - } - - nfc_prepare_interrupt(host, flag); - nfc_writel(host->nfc->hsmc_regs, CYCLE0, cycle0); - nfc_cmd_addr1234_writel(cmd, addr, host->nfc->base_cmd_regs); - return nfc_wait_interrupt(host, flag); -} - -static int nfc_device_ready(struct mtd_info *mtd) -{ - u32 status, mask; - struct nand_chip *nand_chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(nand_chip); - - status = nfc_read_status(host); - mask = nfc_readl(host->nfc->hsmc_regs, IMR); - - /* The mask should be 0. If not we may lost interrupts */ - if (unlikely(mask & status)) - dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n", - mask & status); - - return status & NFC_SR_RB_EDGE; -} - -static void nfc_select_chip(struct mtd_info *mtd, int chip) -{ - struct nand_chip *nand_chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(nand_chip); - - if (chip == -1) - nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_DISABLE); - else - nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_ENABLE); -} - -static int nfc_make_addr(struct mtd_info *mtd, int command, int column, - int page_addr, unsigned int *addr1234, unsigned int *cycle0) -{ - struct nand_chip *chip = mtd_to_nand(mtd); - - int acycle = 0; - unsigned char addr_bytes[8]; - int index = 0, bit_shift; - - BUG_ON(addr1234 == NULL || cycle0 == NULL); - - *cycle0 = 0; - *addr1234 = 0; - - if (column != -1) { - if (chip->options & NAND_BUSWIDTH_16 && - !nand_opcode_8bits(command)) - column >>= 1; - addr_bytes[acycle++] = column & 0xff; - if (mtd->writesize > 512) - addr_bytes[acycle++] = (column >> 8) & 0xff; - } - - if (page_addr != -1) { - addr_bytes[acycle++] = page_addr & 0xff; - addr_bytes[acycle++] = (page_addr >> 8) & 0xff; - if (chip->chipsize > (128 << 20)) - addr_bytes[acycle++] = (page_addr >> 16) & 0xff; - } - - if (acycle > 4) - *cycle0 = addr_bytes[index++]; - - for (bit_shift = 0; index < acycle; bit_shift += 8) - *addr1234 += addr_bytes[index++] << bit_shift; - - /* return acycle in cmd register */ - return acycle << NFCADDR_CMD_ACYCLE_BIT_POS; -} - -static void nfc_nand_command(struct mtd_info *mtd, unsigned int command, - int column, int page_addr) -{ - struct nand_chip *chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(chip); - unsigned long timeout; - unsigned int nfc_addr_cmd = 0; - - unsigned int cmd1 = command << NFCADDR_CMD_CMD1_BIT_POS; - - /* Set default settings: no cmd2, no addr cycle. read from nand */ - unsigned int cmd2 = 0; - unsigned int vcmd2 = 0; - int acycle = NFCADDR_CMD_ACYCLE_NONE; - int csid = NFCADDR_CMD_CSID_3; - int dataen = NFCADDR_CMD_DATADIS; - int nfcwr = NFCADDR_CMD_NFCRD; - unsigned int addr1234 = 0; - unsigned int cycle0 = 0; - bool do_addr = true; - host->nfc->data_in_sram = NULL; - - dev_dbg(host->dev, "%s: cmd = 0x%02x, col = 0x%08x, page = 0x%08x\n", - __func__, command, column, page_addr); - - switch (command) { - case NAND_CMD_RESET: - nfc_addr_cmd = cmd1 | acycle | csid | dataen | nfcwr; - nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0); - udelay(chip->chip_delay); - - nfc_nand_command(mtd, NAND_CMD_STATUS, -1, -1); - timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS); - while (!(chip->read_byte(mtd) & NAND_STATUS_READY)) { - if (time_after(jiffies, timeout)) { - dev_err(host->dev, - "Time out to wait status ready!\n"); - break; - } - } - return; - case NAND_CMD_STATUS: - do_addr = false; - break; - case NAND_CMD_PARAM: - case NAND_CMD_READID: - do_addr = false; - acycle = NFCADDR_CMD_ACYCLE_1; - if (column != -1) - addr1234 = column; - break; - case NAND_CMD_RNDOUT: - cmd2 = NAND_CMD_RNDOUTSTART << NFCADDR_CMD_CMD2_BIT_POS; - vcmd2 = NFCADDR_CMD_VCMD2; - break; - case NAND_CMD_READ0: - case NAND_CMD_READOOB: - if (command == NAND_CMD_READOOB) { - column += mtd->writesize; - command = NAND_CMD_READ0; /* only READ0 is valid */ - cmd1 = command << NFCADDR_CMD_CMD1_BIT_POS; - } - if (host->nfc->use_nfc_sram) { - /* Enable Data transfer to sram */ - dataen = NFCADDR_CMD_DATAEN; - - /* Need enable PMECC now, since NFC will transfer - * data in bus after sending nfc read command. - */ - if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc) - pmecc_enable(host, NAND_ECC_READ); - } - - cmd2 = NAND_CMD_READSTART << NFCADDR_CMD_CMD2_BIT_POS; - vcmd2 = NFCADDR_CMD_VCMD2; - break; - /* For prgramming command, the cmd need set to write enable */ - case NAND_CMD_PAGEPROG: - case NAND_CMD_SEQIN: - case NAND_CMD_RNDIN: - nfcwr = NFCADDR_CMD_NFCWR; - if (host->nfc->will_write_sram && command == NAND_CMD_SEQIN) - dataen = NFCADDR_CMD_DATAEN; - break; - default: - break; - } - - if (do_addr) - acycle = nfc_make_addr(mtd, command, column, page_addr, - &addr1234, &cycle0); - - nfc_addr_cmd = cmd1 | cmd2 | vcmd2 | acycle | csid | dataen | nfcwr; - nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0); - - /* - * Program and erase have their own busy handlers status, sequential - * in, and deplete1 need no delay. - */ - switch (command) { - case NAND_CMD_CACHEDPROG: - case NAND_CMD_PAGEPROG: - case NAND_CMD_ERASE1: - case NAND_CMD_ERASE2: - case NAND_CMD_RNDIN: - case NAND_CMD_STATUS: - case NAND_CMD_RNDOUT: - case NAND_CMD_SEQIN: - case NAND_CMD_READID: - return; - - case NAND_CMD_READ0: - if (dataen == NFCADDR_CMD_DATAEN) { - host->nfc->data_in_sram = host->nfc->sram_bank0 + - nfc_get_sram_off(host); - return; - } - /* fall through */ - default: - nfc_prepare_interrupt(host, NFC_SR_RB_EDGE); - nfc_wait_interrupt(host, NFC_SR_RB_EDGE); - } -} - -static int nfc_sram_write_page(struct mtd_info *mtd, struct nand_chip *chip, - uint32_t offset, int data_len, const uint8_t *buf, - int oob_required, int page, int cached, int raw) -{ - int cfg, len; - int status = 0; - struct atmel_nand_host *host = nand_get_controller_data(chip); - void *sram = host->nfc->sram_bank0 + nfc_get_sram_off(host); - - /* Subpage write is not supported */ - if (offset || (data_len < mtd->writesize)) - return -EINVAL; - - len = mtd->writesize; - /* Copy page data to sram that will write to nand via NFC */ - if (use_dma) { - if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) != 0) - /* Fall back to use cpu copy */ - memcpy(sram, buf, len); - } else { - memcpy(sram, buf, len); - } - - cfg = nfc_readl(host->nfc->hsmc_regs, CFG); - if (unlikely(raw) && oob_required) { - memcpy(sram + len, chip->oob_poi, mtd->oobsize); - len += mtd->oobsize; - nfc_writel(host->nfc->hsmc_regs, CFG, cfg | NFC_CFG_WSPARE); - } else { - nfc_writel(host->nfc->hsmc_regs, CFG, cfg & ~NFC_CFG_WSPARE); - } - - if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc) - /* - * When use NFC sram, need set up PMECC before send - * NAND_CMD_SEQIN command. Since when the nand command - * is sent, nfc will do transfer from sram and nand. - */ - pmecc_enable(host, NAND_ECC_WRITE); - - host->nfc->will_write_sram = true; - chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); - host->nfc->will_write_sram = false; - - if (likely(!raw)) - /* Need to write ecc into oob */ - status = chip->ecc.write_page(mtd, chip, buf, oob_required, - page); - - if (status < 0) - return status; - - chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); - status = chip->waitfunc(mtd, chip); - - if ((status & NAND_STATUS_FAIL) && (chip->errstat)) - status = chip->errstat(mtd, chip, FL_WRITING, status, page); - - if (status & NAND_STATUS_FAIL) - return -EIO; - - return 0; -} - -static int nfc_sram_init(struct mtd_info *mtd) -{ - struct nand_chip *chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(chip); - int res = 0; - - /* Initialize the NFC CFG register */ - unsigned int cfg_nfc = 0; - - /* set page size and oob layout */ - switch (mtd->writesize) { - case 512: - cfg_nfc = NFC_CFG_PAGESIZE_512; - break; - case 1024: - cfg_nfc = NFC_CFG_PAGESIZE_1024; - break; - case 2048: - cfg_nfc = NFC_CFG_PAGESIZE_2048; - break; - case 4096: - cfg_nfc = NFC_CFG_PAGESIZE_4096; - break; - case 8192: - cfg_nfc = NFC_CFG_PAGESIZE_8192; - break; - default: - dev_err(host->dev, "Unsupported page size for NFC.\n"); - res = -ENXIO; - return res; - } - - /* oob bytes size = (NFCSPARESIZE + 1) * 4 - * Max support spare size is 512 bytes. */ - cfg_nfc |= (((mtd->oobsize / 4) - 1) << NFC_CFG_NFC_SPARESIZE_BIT_POS - & NFC_CFG_NFC_SPARESIZE); - /* default set a max timeout */ - cfg_nfc |= NFC_CFG_RSPARE | - NFC_CFG_NFC_DTOCYC | NFC_CFG_NFC_DTOMUL; - - nfc_writel(host->nfc->hsmc_regs, CFG, cfg_nfc); - - host->nfc->will_write_sram = false; - nfc_set_sram_bank(host, 0); - - /* Use Write page with NFC SRAM only for PMECC or ECC NONE. */ - if (host->nfc->write_by_sram) { - if ((chip->ecc.mode == NAND_ECC_HW && host->has_pmecc) || - chip->ecc.mode == NAND_ECC_NONE) - chip->write_page = nfc_sram_write_page; - else - host->nfc->write_by_sram = false; - } - - dev_info(host->dev, "Using NFC Sram read %s\n", - host->nfc->write_by_sram ? "and write" : ""); - return 0; -} - -static struct platform_driver atmel_nand_nfc_driver; -/* - * Probe for the NAND device. - */ -static int atmel_nand_probe(struct platform_device *pdev) -{ - struct atmel_nand_host *host; - struct mtd_info *mtd; - struct nand_chip *nand_chip; - struct resource *mem; - int res, irq; - - /* Allocate memory for the device structure (and zero it) */ - host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); - if (!host) - return -ENOMEM; - - res = platform_driver_register(&atmel_nand_nfc_driver); - if (res) - dev_err(&pdev->dev, "atmel_nand: can't register NFC driver\n"); - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->io_base = devm_ioremap_resource(&pdev->dev, mem); - if (IS_ERR(host->io_base)) { - res = PTR_ERR(host->io_base); - goto err_nand_ioremap; - } - host->io_phys = (dma_addr_t)mem->start; - - nand_chip = &host->nand_chip; - mtd = nand_to_mtd(nand_chip); - host->dev = &pdev->dev; - if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) { - nand_set_flash_node(nand_chip, pdev->dev.of_node); - /* Only when CONFIG_OF is enabled of_node can be parsed */ - res = atmel_of_init_port(host, pdev->dev.of_node); - if (res) - goto err_nand_ioremap; - } else { - memcpy(&host->board, dev_get_platdata(&pdev->dev), - sizeof(struct atmel_nand_data)); - nand_chip->ecc.mode = host->board.ecc_mode; - - /* - * When using software ECC every supported avr32 board means - * Hamming algorithm. If that ever changes we'll need to add - * ecc_algo field to the struct atmel_nand_data. - */ - if (nand_chip->ecc.mode == NAND_ECC_SOFT) - nand_chip->ecc.algo = NAND_ECC_HAMMING; - - /* 16-bit bus width */ - if (host->board.bus_width_16) - nand_chip->options |= NAND_BUSWIDTH_16; - } - - /* link the private data structures */ - nand_set_controller_data(nand_chip, host); - mtd->dev.parent = &pdev->dev; - - /* Set address of NAND IO lines */ - nand_chip->IO_ADDR_R = host->io_base; - nand_chip->IO_ADDR_W = host->io_base; - - if (nand_nfc.is_initialized) { - /* NFC driver is probed and initialized */ - host->nfc = &nand_nfc; - - nand_chip->select_chip = nfc_select_chip; - nand_chip->dev_ready = nfc_device_ready; - nand_chip->cmdfunc = nfc_nand_command; - - /* Initialize the interrupt for NFC */ - irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(host->dev, "Cannot get HSMC irq!\n"); - res = irq; - goto err_nand_ioremap; - } - - res = devm_request_irq(&pdev->dev, irq, hsmc_interrupt, - 0, "hsmc", host); - if (res) { - dev_err(&pdev->dev, "Unable to request HSMC irq %d\n", - irq); - goto err_nand_ioremap; - } - } else { - res = atmel_nand_set_enable_ready_pins(mtd); - if (res) - goto err_nand_ioremap; - - nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl; - } - - nand_chip->chip_delay = 40; /* 40us command delay time */ - - - nand_chip->read_buf = atmel_read_buf; - nand_chip->write_buf = atmel_write_buf; - - platform_set_drvdata(pdev, host); - atmel_nand_enable(host); - - if (gpio_is_valid(host->board.det_pin)) { - res = devm_gpio_request(&pdev->dev, - host->board.det_pin, "nand_det"); - if (res < 0) { - dev_err(&pdev->dev, - "can't request det gpio %d\n", - host->board.det_pin); - goto err_no_card; - } - - res = gpio_direction_input(host->board.det_pin); - if (res < 0) { - dev_err(&pdev->dev, - "can't request input direction det gpio %d\n", - host->board.det_pin); - goto err_no_card; - } - - if (gpio_get_value(host->board.det_pin)) { - dev_info(&pdev->dev, "No SmartMedia card inserted.\n"); - res = -ENXIO; - goto err_no_card; - } - } - - if (!host->board.has_dma) - use_dma = 0; - - if (use_dma) { - dma_cap_mask_t mask; - - dma_cap_zero(mask); - dma_cap_set(DMA_MEMCPY, mask); - host->dma_chan = dma_request_channel(mask, NULL, NULL); - if (!host->dma_chan) { - dev_err(host->dev, "Failed to request DMA channel\n"); - use_dma = 0; - } - } - if (use_dma) - dev_info(host->dev, "Using %s for DMA transfers.\n", - dma_chan_name(host->dma_chan)); - else - dev_info(host->dev, "No DMA support for NAND access.\n"); - - /* first scan to find the device and get the page size */ - res = nand_scan_ident(mtd, 1, NULL); - if (res) - goto err_scan_ident; - - if (host->board.on_flash_bbt || on_flash_bbt) - nand_chip->bbt_options |= NAND_BBT_USE_FLASH; - - if (nand_chip->bbt_options & NAND_BBT_USE_FLASH) - dev_info(&pdev->dev, "Use On Flash BBT\n"); - - if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) { - res = atmel_of_init_ecc(host, pdev->dev.of_node); - if (res) - goto err_hw_ecc; - } - - if (nand_chip->ecc.mode == NAND_ECC_HW) { - if (host->has_pmecc) - res = atmel_pmecc_nand_init_params(pdev, host); - else - res = atmel_hw_nand_init_params(pdev, host); - - if (res != 0) - goto err_hw_ecc; - } - - /* initialize the nfc configuration register */ - if (host->nfc && host->nfc->use_nfc_sram) { - res = nfc_sram_init(mtd); - if (res) { - host->nfc->use_nfc_sram = false; - dev_err(host->dev, "Disable use nfc sram for data transfer.\n"); - } - } - - /* second phase scan */ - res = nand_scan_tail(mtd); - if (res) - goto err_scan_tail; - - mtd->name = "atmel_nand"; - res = mtd_device_register(mtd, host->board.parts, - host->board.num_parts); - if (!res) - return res; - -err_scan_tail: - if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) - pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE); -err_hw_ecc: -err_scan_ident: -err_no_card: - atmel_nand_disable(host); - if (host->dma_chan) - dma_release_channel(host->dma_chan); -err_nand_ioremap: - return res; -} - -/* - * Remove a NAND device. - */ -static int atmel_nand_remove(struct platform_device *pdev) -{ - struct atmel_nand_host *host = platform_get_drvdata(pdev); - struct mtd_info *mtd = nand_to_mtd(&host->nand_chip); - - nand_release(mtd); - - atmel_nand_disable(host); - - if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) { - pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE); - pmerrloc_writel(host->pmerrloc_base, ELDIS, - PMERRLOC_DISABLE); - } - - if (host->dma_chan) - dma_release_channel(host->dma_chan); - - platform_driver_unregister(&atmel_nand_nfc_driver); - - return 0; -} - -/* - * AT91RM9200 does not have PMECC or PMECC Errloc peripherals for - * BCH ECC. Combined with the "atmel,has-pmecc", it is used to describe - * devices from the SAM9 family that have those. - */ -static const struct atmel_nand_caps at91rm9200_caps = { - .pmecc_correct_erase_page = false, - .pmecc_max_correction = 24, -}; - -static const struct atmel_nand_caps sama5d4_caps = { - .pmecc_correct_erase_page = true, - .pmecc_max_correction = 24, -}; - -/* - * The PMECC Errloc controller starting in SAMA5D2 is not compatible, - * as the increased correction strength requires more registers. - */ -static const struct atmel_nand_caps sama5d2_caps = { - .pmecc_correct_erase_page = true, - .pmecc_max_correction = 32, -}; - -static const struct of_device_id atmel_nand_dt_ids[] = { - { .compatible = "atmel,at91rm9200-nand", .data = &at91rm9200_caps }, - { .compatible = "atmel,sama5d4-nand", .data = &sama5d4_caps }, - { .compatible = "atmel,sama5d2-nand", .data = &sama5d2_caps }, - { /* sentinel */ } -}; - -MODULE_DEVICE_TABLE(of, atmel_nand_dt_ids); - -static int atmel_nand_nfc_probe(struct platform_device *pdev) -{ - struct atmel_nfc *nfc = &nand_nfc; - struct resource *nfc_cmd_regs, *nfc_hsmc_regs, *nfc_sram; - int ret; - - nfc_cmd_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - nfc->base_cmd_regs = devm_ioremap_resource(&pdev->dev, nfc_cmd_regs); - if (IS_ERR(nfc->base_cmd_regs)) - return PTR_ERR(nfc->base_cmd_regs); - - nfc_hsmc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); - nfc->hsmc_regs = devm_ioremap_resource(&pdev->dev, nfc_hsmc_regs); - if (IS_ERR(nfc->hsmc_regs)) - return PTR_ERR(nfc->hsmc_regs); - - nfc_sram = platform_get_resource(pdev, IORESOURCE_MEM, 2); - if (nfc_sram) { - nfc->sram_bank0 = (void * __force) - devm_ioremap_resource(&pdev->dev, nfc_sram); - if (IS_ERR(nfc->sram_bank0)) { - dev_warn(&pdev->dev, "Fail to ioremap the NFC sram with error: %ld. So disable NFC sram.\n", - PTR_ERR(nfc->sram_bank0)); - } else { - nfc->use_nfc_sram = true; - nfc->sram_bank0_phys = (dma_addr_t)nfc_sram->start; - - if (pdev->dev.of_node) - nfc->write_by_sram = of_property_read_bool( - pdev->dev.of_node, - "atmel,write-by-sram"); - } - } - - nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff); - nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */ - - nfc->clk = devm_clk_get(&pdev->dev, NULL); - if (!IS_ERR(nfc->clk)) { - ret = clk_prepare_enable(nfc->clk); - if (ret) - return ret; - } else { - dev_warn(&pdev->dev, "NFC clock missing, update your Device Tree"); - } - - nfc->is_initialized = true; - dev_info(&pdev->dev, "NFC is probed.\n"); - - return 0; -} - -static int atmel_nand_nfc_remove(struct platform_device *pdev) -{ - struct atmel_nfc *nfc = &nand_nfc; - - if (!IS_ERR(nfc->clk)) - clk_disable_unprepare(nfc->clk); - - return 0; -} - -static const struct of_device_id atmel_nand_nfc_match[] = { - { .compatible = "atmel,sama5d3-nfc" }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match); - -static struct platform_driver atmel_nand_nfc_driver = { - .driver = { - .name = "atmel_nand_nfc", - .of_match_table = of_match_ptr(atmel_nand_nfc_match), - }, - .probe = atmel_nand_nfc_probe, - .remove = atmel_nand_nfc_remove, -}; - -static struct platform_driver atmel_nand_driver = { - .probe = atmel_nand_probe, - .remove = atmel_nand_remove, - .driver = { - .name = "atmel_nand", - .of_match_table = of_match_ptr(atmel_nand_dt_ids), - }, -}; - -module_platform_driver(atmel_nand_driver); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Rick Bronson"); -MODULE_DESCRIPTION("NAND/SmartMedia driver for AT91 / AVR32"); -MODULE_ALIAS("platform:atmel_nand"); diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h deleted file mode 100644 index 834d694487bd..000000000000 --- a/drivers/mtd/nand/atmel_nand_ecc.h +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Error Corrected Code Controller (ECC) - System peripherals regsters. - * Based on AT91SAM9260 datasheet revision B. - * - * Copyright (C) 2007 Andrew Victor - * Copyright (C) 2007 - 2012 Atmel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef ATMEL_NAND_ECC_H -#define ATMEL_NAND_ECC_H - -#define ATMEL_ECC_CR 0x00 /* Control register */ -#define ATMEL_ECC_RST (1 << 0) /* Reset parity */ - -#define ATMEL_ECC_MR 0x04 /* Mode register */ -#define ATMEL_ECC_PAGESIZE (3 << 0) /* Page Size */ -#define ATMEL_ECC_PAGESIZE_528 (0) -#define ATMEL_ECC_PAGESIZE_1056 (1) -#define ATMEL_ECC_PAGESIZE_2112 (2) -#define ATMEL_ECC_PAGESIZE_4224 (3) - -#define ATMEL_ECC_SR 0x08 /* Status register */ -#define ATMEL_ECC_RECERR (1 << 0) /* Recoverable Error */ -#define ATMEL_ECC_ECCERR (1 << 1) /* ECC Single Bit Error */ -#define ATMEL_ECC_MULERR (1 << 2) /* Multiple Errors */ - -#define ATMEL_ECC_PR 0x0c /* Parity register */ -#define ATMEL_ECC_BITADDR (0xf << 0) /* Bit Error Address */ -#define ATMEL_ECC_WORDADDR (0xfff << 4) /* Word Error Address */ - -#define ATMEL_ECC_NPR 0x10 /* NParity register */ -#define ATMEL_ECC_NPARITY (0xffff << 0) /* NParity */ - -/* PMECC Register Definitions */ -#define ATMEL_PMECC_CFG 0x000 /* Configuration Register */ -#define PMECC_CFG_BCH_ERR2 (0 << 0) -#define PMECC_CFG_BCH_ERR4 (1 << 0) -#define PMECC_CFG_BCH_ERR8 (2 << 0) -#define PMECC_CFG_BCH_ERR12 (3 << 0) -#define PMECC_CFG_BCH_ERR24 (4 << 0) -#define PMECC_CFG_BCH_ERR32 (5 << 0) - -#define PMECC_CFG_SECTOR512 (0 << 4) -#define PMECC_CFG_SECTOR1024 (1 << 4) - -#define PMECC_CFG_PAGE_1SECTOR (0 << 8) -#define PMECC_CFG_PAGE_2SECTORS (1 << 8) -#define PMECC_CFG_PAGE_4SECTORS (2 << 8) -#define PMECC_CFG_PAGE_8SECTORS (3 << 8) - -#define PMECC_CFG_READ_OP (0 << 12) -#define PMECC_CFG_WRITE_OP (1 << 12) - -#define PMECC_CFG_SPARE_ENABLE (1 << 16) -#define PMECC_CFG_SPARE_DISABLE (0 << 16) - -#define PMECC_CFG_AUTO_ENABLE (1 << 20) -#define PMECC_CFG_AUTO_DISABLE (0 << 20) - -#define ATMEL_PMECC_SAREA 0x004 /* Spare area size */ -#define ATMEL_PMECC_SADDR 0x008 /* PMECC starting address */ -#define ATMEL_PMECC_EADDR 0x00c /* PMECC ending address */ -#define ATMEL_PMECC_CLK 0x010 /* PMECC clock control */ -#define PMECC_CLK_133MHZ (2 << 0) - -#define ATMEL_PMECC_CTRL 0x014 /* PMECC control register */ -#define PMECC_CTRL_RST (1 << 0) -#define PMECC_CTRL_DATA (1 << 1) -#define PMECC_CTRL_USER (1 << 2) -#define PMECC_CTRL_ENABLE (1 << 4) -#define PMECC_CTRL_DISABLE (1 << 5) - -#define ATMEL_PMECC_SR 0x018 /* PMECC status register */ -#define PMECC_SR_BUSY (1 << 0) -#define PMECC_SR_ENABLE (1 << 4) - -#define ATMEL_PMECC_IER 0x01c /* PMECC interrupt enable */ -#define PMECC_IER_ENABLE (1 << 0) -#define ATMEL_PMECC_IDR 0x020 /* PMECC interrupt disable */ -#define PMECC_IER_DISABLE (1 << 0) -#define ATMEL_PMECC_IMR 0x024 /* PMECC interrupt mask */ -#define PMECC_IER_MASK (1 << 0) -#define ATMEL_PMECC_ISR 0x028 /* PMECC interrupt status */ -#define ATMEL_PMECC_ECCx 0x040 /* PMECC ECC x */ -#define ATMEL_PMECC_REMx 0x240 /* PMECC REM x */ - -/* PMERRLOC Register Definitions */ -#define ATMEL_PMERRLOC_ELCFG 0x000 /* Error location config */ -#define PMERRLOC_ELCFG_SECTOR_512 (0 << 0) -#define PMERRLOC_ELCFG_SECTOR_1024 (1 << 0) -#define PMERRLOC_ELCFG_NUM_ERRORS(n) ((n) << 16) - -#define ATMEL_PMERRLOC_ELPRIM 0x004 /* Error location primitive */ -#define ATMEL_PMERRLOC_ELEN 0x008 /* Error location enable */ -#define ATMEL_PMERRLOC_ELDIS 0x00c /* Error location disable */ -#define PMERRLOC_DISABLE (1 << 0) - -#define ATMEL_PMERRLOC_ELSR 0x010 /* Error location status */ -#define PMERRLOC_ELSR_BUSY (1 << 0) -#define ATMEL_PMERRLOC_ELIER 0x014 /* Error location int enable */ -#define ATMEL_PMERRLOC_ELIDR 0x018 /* Error location int disable */ -#define ATMEL_PMERRLOC_ELIMR 0x01c /* Error location int mask */ -#define ATMEL_PMERRLOC_ELISR 0x020 /* Error location int status */ -#define PMERRLOC_ERR_NUM_MASK (0x1f << 8) -#define PMERRLOC_CALC_DONE (1 << 0) -#define ATMEL_PMERRLOC_SIGMAx 0x028 /* Error location SIGMA x */ - -/* - * The ATMEL_PMERRLOC_ELx register location depends from the number of - * bits corrected by the PMECC controller. Do not use it. - */ - -/* Register access macros for PMECC */ -#define pmecc_readl_relaxed(addr, reg) \ - readl_relaxed((addr) + ATMEL_PMECC_##reg) - -#define pmecc_writel(addr, reg, value) \ - writel((value), (addr) + ATMEL_PMECC_##reg) - -#define pmecc_readb_ecc_relaxed(addr, sector, n) \ - readb_relaxed((addr) + ATMEL_PMECC_ECCx + ((sector) * 0x40) + (n)) - -#define pmecc_readl_rem_relaxed(addr, sector, n) \ - readl_relaxed((addr) + ATMEL_PMECC_REMx + ((sector) * 0x40) + ((n) * 4)) - -#define pmerrloc_readl_relaxed(addr, reg) \ - readl_relaxed((addr) + ATMEL_PMERRLOC_##reg) - -#define pmerrloc_writel(addr, reg, value) \ - writel((value), (addr) + ATMEL_PMERRLOC_##reg) - -#define pmerrloc_writel_sigma_relaxed(addr, n, value) \ - writel_relaxed((value), (addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4)) - -#define pmerrloc_readl_sigma_relaxed(addr, n) \ - readl_relaxed((addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4)) - -#define pmerrloc_readl_el_relaxed(addr, n) \ - readl_relaxed((addr) + ((n) * 4)) - -/* Galois field dimension */ -#define PMECC_GF_DIMENSION_13 13 -#define PMECC_GF_DIMENSION_14 14 - -/* Primitive Polynomial used by PMECC */ -#define PMECC_GF_13_PRIMITIVE_POLY 0x201b -#define PMECC_GF_14_PRIMITIVE_POLY 0x4443 - -#define PMECC_LOOKUP_TABLE_SIZE_512 0x2000 -#define PMECC_LOOKUP_TABLE_SIZE_1024 0x4000 - -/* Time out value for reading PMECC status register */ -#define PMECC_MAX_TIMEOUT_MS 100 - -/* Reserved bytes in oob area */ -#define PMECC_OOB_RESERVED_BYTES 2 - -#endif diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h deleted file mode 100644 index 4d5d26221a7e..000000000000 --- a/drivers/mtd/nand/atmel_nand_nfc.h +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Atmel Nand Flash Controller (NFC) - System peripherals regsters. - * Based on SAMA5D3 datasheet. - * - * © Copyright 2013 Atmel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef ATMEL_NAND_NFC_H -#define ATMEL_NAND_NFC_H - -/* - * HSMC NFC registers - */ -#define ATMEL_HSMC_NFC_CFG 0x00 /* NFC Configuration Register */ -#define NFC_CFG_PAGESIZE (7 << 0) -#define NFC_CFG_PAGESIZE_512 (0 << 0) -#define NFC_CFG_PAGESIZE_1024 (1 << 0) -#define NFC_CFG_PAGESIZE_2048 (2 << 0) -#define NFC_CFG_PAGESIZE_4096 (3 << 0) -#define NFC_CFG_PAGESIZE_8192 (4 << 0) -#define NFC_CFG_WSPARE (1 << 8) -#define NFC_CFG_RSPARE (1 << 9) -#define NFC_CFG_NFC_DTOCYC (0xf << 16) -#define NFC_CFG_NFC_DTOMUL (0x7 << 20) -#define NFC_CFG_NFC_SPARESIZE (0x7f << 24) -#define NFC_CFG_NFC_SPARESIZE_BIT_POS 24 - -#define ATMEL_HSMC_NFC_CTRL 0x04 /* NFC Control Register */ -#define NFC_CTRL_ENABLE (1 << 0) -#define NFC_CTRL_DISABLE (1 << 1) - -#define ATMEL_HSMC_NFC_SR 0x08 /* NFC Status Register */ -#define NFC_SR_BUSY (1 << 8) -#define NFC_SR_XFR_DONE (1 << 16) -#define NFC_SR_CMD_DONE (1 << 17) -#define NFC_SR_DTOE (1 << 20) -#define NFC_SR_UNDEF (1 << 21) -#define NFC_SR_AWB (1 << 22) -#define NFC_SR_ASE (1 << 23) -#define NFC_SR_RB_EDGE (1 << 24) - -#define ATMEL_HSMC_NFC_IER 0x0c -#define ATMEL_HSMC_NFC_IDR 0x10 -#define ATMEL_HSMC_NFC_IMR 0x14 -#define ATMEL_HSMC_NFC_CYCLE0 0x18 /* NFC Address Cycle Zero */ -#define ATMEL_HSMC_NFC_ADDR_CYCLE0 (0xff) - -#define ATMEL_HSMC_NFC_BANK 0x1c /* NFC Bank Register */ -#define ATMEL_HSMC_NFC_BANK0 (0 << 0) -#define ATMEL_HSMC_NFC_BANK1 (1 << 0) - -#define nfc_writel(addr, reg, value) \ - writel((value), (addr) + ATMEL_HSMC_NFC_##reg) - -#define nfc_readl(addr, reg) \ - readl_relaxed((addr) + ATMEL_HSMC_NFC_##reg) - -/* - * NFC Address Command definitions - */ -#define NFCADDR_CMD_CMD1 (0xff << 2) /* Command for Cycle 1 */ -#define NFCADDR_CMD_CMD1_BIT_POS 2 -#define NFCADDR_CMD_CMD2 (0xff << 10) /* Command for Cycle 2 */ -#define NFCADDR_CMD_CMD2_BIT_POS 10 -#define NFCADDR_CMD_VCMD2 (0x1 << 18) /* Valid Cycle 2 Command */ -#define NFCADDR_CMD_ACYCLE (0x7 << 19) /* Number of Address required */ -#define NFCADDR_CMD_ACYCLE_NONE (0x0 << 19) -#define NFCADDR_CMD_ACYCLE_1 (0x1 << 19) -#define NFCADDR_CMD_ACYCLE_2 (0x2 << 19) -#define NFCADDR_CMD_ACYCLE_3 (0x3 << 19) -#define NFCADDR_CMD_ACYCLE_4 (0x4 << 19) -#define NFCADDR_CMD_ACYCLE_5 (0x5 << 19) -#define NFCADDR_CMD_ACYCLE_BIT_POS 19 -#define NFCADDR_CMD_CSID (0x7 << 22) /* Chip Select Identifier */ -#define NFCADDR_CMD_CSID_0 (0x0 << 22) -#define NFCADDR_CMD_CSID_1 (0x1 << 22) -#define NFCADDR_CMD_CSID_2 (0x2 << 22) -#define NFCADDR_CMD_CSID_3 (0x3 << 22) -#define NFCADDR_CMD_CSID_4 (0x4 << 22) -#define NFCADDR_CMD_CSID_5 (0x5 << 22) -#define NFCADDR_CMD_CSID_6 (0x6 << 22) -#define NFCADDR_CMD_CSID_7 (0x7 << 22) -#define NFCADDR_CMD_DATAEN (0x1 << 25) /* Data Transfer Enable */ -#define NFCADDR_CMD_DATADIS (0x0 << 25) /* Data Transfer Disable */ -#define NFCADDR_CMD_NFCRD (0x0 << 26) /* NFC Read Enable */ -#define NFCADDR_CMD_NFCWR (0x1 << 26) /* NFC Write Enable */ -#define NFCADDR_CMD_NFCBUSY (0x1 << 27) /* NFC Busy */ - -#define nfc_cmd_addr1234_writel(cmd, addr1234, nfc_base) \ - writel((addr1234), (cmd) + nfc_base) - -#define nfc_cmd_readl(bitstatus, nfc_base) \ - readl_relaxed((bitstatus) + nfc_base) - -#define NFC_TIME_OUT_MS 100 -#define NFC_SRAM_BANK1_OFFSET 0x1200 - -#endif diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c index 42ebd73f821d..7419c5ce63f8 100644 --- a/drivers/mtd/nand/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/brcmnand/brcmnand.c @@ -101,6 +101,9 @@ struct brcm_nand_dma_desc { #define BRCMNAND_MIN_BLOCKSIZE (8 * 1024) #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024) +#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY) +#define NAND_POLL_STATUS_TIMEOUT_MS 100 + /* Controller feature flags */ enum { BRCMNAND_HAS_1K_SECTORS = BIT(0), @@ -765,6 +768,31 @@ enum { CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30), }; +static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl, + u32 mask, u32 expected_val, + unsigned long timeout_ms) +{ + unsigned long limit; + u32 val; + + if (!timeout_ms) + timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS; + + limit = jiffies + msecs_to_jiffies(timeout_ms); + do { + val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); + if ((val & mask) == expected_val) + return 0; + + cpu_relax(); + } while (time_after(limit, jiffies)); + + dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n", + expected_val, val & mask); + + return -ETIMEDOUT; +} + static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en) { u32 val = en ? CS_SELECT_NAND_WP : 0; @@ -1024,12 +1052,39 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp) if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) { static int old_wp = -1; + int ret; if (old_wp != wp) { dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off"); old_wp = wp; } + + /* + * make sure ctrl/flash ready before and after + * changing state of #WP pin + */ + ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY | + NAND_STATUS_READY, + NAND_CTRL_RDY | + NAND_STATUS_READY, 0); + if (ret) + return; + brcmnand_set_wp(ctrl, wp); + chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); + /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */ + ret = bcmnand_ctrl_poll_status(ctrl, + NAND_CTRL_RDY | + NAND_STATUS_READY | + NAND_STATUS_WP, + NAND_CTRL_RDY | + NAND_STATUS_READY | + (wp ? 0 : NAND_STATUS_WP), 0); + + if (ret) + dev_err_ratelimited(&host->pdev->dev, + "nand #WP expected %s\n", + wp ? "on" : "off"); } } @@ -1157,15 +1212,15 @@ static irqreturn_t brcmnand_dma_irq(int irq, void *data) static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd) { struct brcmnand_controller *ctrl = host->ctrl; - u32 intfc; + int ret; dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd, brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS)); BUG_ON(ctrl->cmd_pending != 0); ctrl->cmd_pending = cmd; - intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); - WARN_ON(!(intfc & INTFC_CTLR_READY)); + ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); + WARN_ON(ret); mb(); /* flush previous writes */ brcmnand_write_reg(ctrl, BRCMNAND_CMD_START, diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c index 226ac0bcafc6..949b9400dcb7 100644 --- a/drivers/mtd/nand/cmx270_nand.c +++ b/drivers/mtd/nand/cmx270_nand.c @@ -145,7 +145,7 @@ static int __init cmx270_init(void) ret = gpio_request(GPIO_NAND_CS, "NAND CS"); if (ret) { - pr_warning("CM-X270: failed to request NAND CS gpio\n"); + pr_warn("CM-X270: failed to request NAND CS gpio\n"); return ret; } @@ -153,7 +153,7 @@ static int __init cmx270_init(void) ret = gpio_request(GPIO_NAND_RB, "NAND R/B"); if (ret) { - pr_warning("CM-X270: failed to request NAND R/B gpio\n"); + pr_warn("CM-X270: failed to request NAND R/B gpio\n"); goto err_gpio_request; } diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 27fa8b87cd5f..531c51991e57 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -581,6 +581,17 @@ static struct davinci_nand_pdata "ti,davinci-nand-use-bbt")) pdata->bbt_options = NAND_BBT_USE_FLASH; + /* + * Since kernel v4.8, this driver has been fixed to enable + * use of 4-bit hardware ECC with subpages and verified on + * TI's keystone EVMs (K2L, K2HK and K2E). + * However, in the interest of not breaking systems using + * existing UBI partitions, sub-page writes are not being + * (re)enabled. If you want to use subpage writes on Keystone + * platforms (i.e. do not have any existing UBI partitions), + * then use "ti,davinci-nand" as the compatible in your + * device-tree file. + */ if (of_device_is_compatible(pdev->dev.of_node, "ti,keystone-nand")) { pdata->options |= NAND_NO_SUBPAGE_WRITE; diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index 73b9d4e2dca0..16634df2e39a 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c @@ -45,16 +45,16 @@ MODULE_PARM_DESC(onfi_timing_mode, * We define a macro here that combines all interrupts this driver uses into * a single constant value, for convenience. */ -#define DENALI_IRQ_ALL (INTR_STATUS__DMA_CMD_COMP | \ - INTR_STATUS__ECC_TRANSACTION_DONE | \ - INTR_STATUS__ECC_ERR | \ - INTR_STATUS__PROGRAM_FAIL | \ - INTR_STATUS__LOAD_COMP | \ - INTR_STATUS__PROGRAM_COMP | \ - INTR_STATUS__TIME_OUT | \ - INTR_STATUS__ERASE_FAIL | \ - INTR_STATUS__RST_COMP | \ - INTR_STATUS__ERASE_COMP) +#define DENALI_IRQ_ALL (INTR__DMA_CMD_COMP | \ + INTR__ECC_TRANSACTION_DONE | \ + INTR__ECC_ERR | \ + INTR__PROGRAM_FAIL | \ + INTR__LOAD_COMP | \ + INTR__PROGRAM_COMP | \ + INTR__TIME_OUT | \ + INTR__ERASE_FAIL | \ + INTR__RST_COMP | \ + INTR__ERASE_COMP) /* * indicates whether or not the internal value for the flash bank is @@ -62,8 +62,6 @@ MODULE_PARM_DESC(onfi_timing_mode, */ #define CHIP_SELECT_INVALID -1 -#define SUPPORT_8BITECC 1 - /* * This macro divides two integers and rounds fractional values up * to the nearest integer value. @@ -86,16 +84,10 @@ static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) #define SPARE_ACCESS 0x41 #define MAIN_ACCESS 0x42 #define MAIN_SPARE_ACCESS 0x43 -#define PIPELINE_ACCESS 0x2000 #define DENALI_READ 0 #define DENALI_WRITE 0x100 -/* types of device accesses. We can issue commands and get status */ -#define COMMAND_CYCLE 0 -#define ADDR_CYCLE 1 -#define STATUS_CYCLE 2 - /* * this is a helper macro that allows us to * format the bank into the proper bits for the controller @@ -164,7 +156,7 @@ static void read_status(struct denali_nand_info *denali) static void reset_bank(struct denali_nand_info *denali) { uint32_t irq_status; - uint32_t irq_mask = INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT; + uint32_t irq_mask = INTR__RST_COMP | INTR__TIME_OUT; clear_interrupts(denali); @@ -172,7 +164,7 @@ static void reset_bank(struct denali_nand_info *denali) irq_status = wait_for_irq(denali, irq_mask); - if (irq_status & INTR_STATUS__TIME_OUT) + if (irq_status & INTR__TIME_OUT) dev_err(denali->dev, "reset bank failed.\n"); } @@ -182,22 +174,22 @@ static uint16_t denali_nand_reset(struct denali_nand_info *denali) int i; for (i = 0; i < denali->max_banks; i++) - iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT, + iowrite32(INTR__RST_COMP | INTR__TIME_OUT, denali->flash_reg + INTR_STATUS(i)); for (i = 0; i < denali->max_banks; i++) { iowrite32(1 << i, denali->flash_reg + DEVICE_RESET); while (!(ioread32(denali->flash_reg + INTR_STATUS(i)) & - (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT))) + (INTR__RST_COMP | INTR__TIME_OUT))) cpu_relax(); if (ioread32(denali->flash_reg + INTR_STATUS(i)) & - INTR_STATUS__TIME_OUT) + INTR__TIME_OUT) dev_dbg(denali->dev, "NAND Reset operation timed out on bank %d\n", i); } for (i = 0; i < denali->max_banks; i++) - iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT, + iowrite32(INTR__RST_COMP | INTR__TIME_OUT, denali->flash_reg + INTR_STATUS(i)); return PASS; @@ -347,52 +339,25 @@ static void get_samsung_nand_para(struct denali_nand_info *denali, static void get_toshiba_nand_para(struct denali_nand_info *denali) { - uint32_t tmp; - /* * Workaround to fix a controller bug which reports a wrong * spare area size for some kind of Toshiba NAND device */ if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) && - (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) { + (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) iowrite32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); - tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) * - ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE); - iowrite32(tmp, - denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); -#if SUPPORT_15BITECC - iowrite32(15, denali->flash_reg + ECC_CORRECTION); -#elif SUPPORT_8BITECC - iowrite32(8, denali->flash_reg + ECC_CORRECTION); -#endif - } } static void get_hynix_nand_para(struct denali_nand_info *denali, uint8_t device_id) { - uint32_t main_size, spare_size; - switch (device_id) { case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */ case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */ iowrite32(128, denali->flash_reg + PAGES_PER_BLOCK); iowrite32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE); iowrite32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); - main_size = 4096 * - ioread32(denali->flash_reg + DEVICES_CONNECTED); - spare_size = 224 * - ioread32(denali->flash_reg + DEVICES_CONNECTED); - iowrite32(main_size, - denali->flash_reg + LOGICAL_PAGE_DATA_SIZE); - iowrite32(spare_size, - denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); iowrite32(0, denali->flash_reg + DEVICE_WIDTH); -#if SUPPORT_15BITECC - iowrite32(15, denali->flash_reg + ECC_CORRECTION); -#elif SUPPORT_8BITECC - iowrite32(8, denali->flash_reg + ECC_CORRECTION); -#endif break; default: dev_warn(denali->dev, @@ -454,17 +419,12 @@ static void find_valid_banks(struct denali_nand_info *denali) static void detect_max_banks(struct denali_nand_info *denali) { uint32_t features = ioread32(denali->flash_reg + FEATURES); - /* - * Read the revision register, so we can calculate the max_banks - * properly: the encoding changed from rev 5.0 to 5.1 - */ - u32 revision = MAKE_COMPARABLE_REVISION( - ioread32(denali->flash_reg + REVISION)); - if (revision < REVISION_5_1) - denali->max_banks = 2 << (features & FEATURES__N_BANKS); - else - denali->max_banks = 1 << (features & FEATURES__N_BANKS); + denali->max_banks = 1 << (features & FEATURES__N_BANKS); + + /* the encoding changed from rev 5.0 to 5.1 */ + if (denali->revision < 0x0501) + denali->max_banks <<= 1; } static uint16_t denali_nand_timing_set(struct denali_nand_info *denali) @@ -653,7 +613,6 @@ static irqreturn_t denali_isr(int irq, void *dev_id) spin_unlock(&denali->irq_lock); return result; } -#define BANK(x) ((x) << 24) static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask) { @@ -718,15 +677,7 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali, int access_type, int op) { int status = PASS; - uint32_t page_count = 1; - uint32_t addr, cmd, irq_status, irq_mask; - - if (op == DENALI_READ) - irq_mask = INTR_STATUS__LOAD_COMP; - else if (op == DENALI_WRITE) - irq_mask = 0; - else - BUG(); + uint32_t addr, cmd; setup_ecc_for_xfer(denali, ecc_en, transfer_spare); @@ -749,35 +700,8 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali, cmd = MODE_10 | addr; index_addr(denali, cmd, access_type); - /* - * page 33 of the NAND controller spec indicates we should not - * use the pipeline commands in Spare area only mode. - * So we don't. - */ - if (access_type == SPARE_ACCESS) { - cmd = MODE_01 | addr; - iowrite32(cmd, denali->flash_mem); - } else { - index_addr(denali, cmd, - PIPELINE_ACCESS | op | page_count); - - /* - * wait for command to be accepted - * can always use status0 bit as the - * mask is identical for each bank. - */ - irq_status = wait_for_irq(denali, irq_mask); - - if (irq_status == 0) { - dev_err(denali->dev, - "cmd, page, addr on timeout (0x%x, 0x%x, 0x%x)\n", - cmd, denali->page, addr); - status = FAIL; - } else { - cmd = MODE_01 | addr; - iowrite32(cmd, denali->flash_mem); - } - } + cmd = MODE_01 | addr; + iowrite32(cmd, denali->flash_mem); } return status; } @@ -829,8 +753,7 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) { struct denali_nand_info *denali = mtd_to_denali(mtd); uint32_t irq_status; - uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP | - INTR_STATUS__PROGRAM_FAIL; + uint32_t irq_mask = INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL; int status = 0; denali->page = page; @@ -857,7 +780,7 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) { struct denali_nand_info *denali = mtd_to_denali(mtd); - uint32_t irq_mask = INTR_STATUS__LOAD_COMP; + uint32_t irq_mask = INTR__LOAD_COMP; uint32_t irq_status, addr, cmd; denali->page = page; @@ -890,98 +813,158 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) } } -/* - * this function examines buffers to see if they contain data that - * indicate that the buffer is part of an erased region of flash. - */ -static bool is_erased(uint8_t *buf, int len) +static int denali_check_erased_page(struct mtd_info *mtd, + struct nand_chip *chip, uint8_t *buf, + unsigned long uncor_ecc_flags, + unsigned int max_bitflips) { - int i; + uint8_t *ecc_code = chip->buffers->ecccode; + int ecc_steps = chip->ecc.steps; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + int i, ret, stat; + + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; + + for (i = 0; i < ecc_steps; i++) { + if (!(uncor_ecc_flags & BIT(i))) + continue; - for (i = 0; i < len; i++) - if (buf[i] != 0xFF) - return false; - return true; + stat = nand_check_erased_ecc_chunk(buf, ecc_size, + ecc_code, ecc_bytes, + NULL, 0, + chip->ecc.strength); + if (stat < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } + + buf += ecc_size; + ecc_code += ecc_bytes; + } + + return max_bitflips; +} + +static int denali_hw_ecc_fixup(struct mtd_info *mtd, + struct denali_nand_info *denali, + unsigned long *uncor_ecc_flags) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + int bank = denali->flash_bank; + uint32_t ecc_cor; + unsigned int max_bitflips; + + ecc_cor = ioread32(denali->flash_reg + ECC_COR_INFO(bank)); + ecc_cor >>= ECC_COR_INFO__SHIFT(bank); + + if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) { + /* + * This flag is set when uncorrectable error occurs at least in + * one ECC sector. We can not know "how many sectors", or + * "which sector(s)". We need erase-page check for all sectors. + */ + *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0); + return 0; + } + + max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS; + + /* + * The register holds the maximum of per-sector corrected bitflips. + * This is suitable for the return value of the ->read_page() callback. + * Unfortunately, we can not know the total number of corrected bits in + * the page. Increase the stats by max_bitflips. (compromised solution) + */ + mtd->ecc_stats.corrected += max_bitflips; + + return max_bitflips; } + #define ECC_SECTOR_SIZE 512 #define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12) #define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET)) #define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK) -#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO__ERROR_TYPE)) +#define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE) #define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8) #define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO) -static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, - uint32_t irq_status, unsigned int *max_bitflips) +static int denali_sw_ecc_fixup(struct mtd_info *mtd, + struct denali_nand_info *denali, + unsigned long *uncor_ecc_flags, uint8_t *buf) { - bool check_erased_page = false; unsigned int bitflips = 0; + unsigned int max_bitflips = 0; + uint32_t err_addr, err_cor_info; + unsigned int err_byte, err_sector, err_device; + uint8_t err_cor_value; + unsigned int prev_sector = 0; - if (irq_status & INTR_STATUS__ECC_ERR) { - /* read the ECC errors. we'll ignore them for now */ - uint32_t err_address, err_correction_info, err_byte, - err_sector, err_device, err_correction_value; - denali_set_intr_modes(denali, false); - - do { - err_address = ioread32(denali->flash_reg + - ECC_ERROR_ADDRESS); - err_sector = ECC_SECTOR(err_address); - err_byte = ECC_BYTE(err_address); - - err_correction_info = ioread32(denali->flash_reg + - ERR_CORRECTION_INFO); - err_correction_value = - ECC_CORRECTION_VALUE(err_correction_info); - err_device = ECC_ERR_DEVICE(err_correction_info); - - if (ECC_ERROR_CORRECTABLE(err_correction_info)) { - /* - * If err_byte is larger than ECC_SECTOR_SIZE, - * means error happened in OOB, so we ignore - * it. It's no need for us to correct it - * err_device is represented the NAND error - * bits are happened in if there are more - * than one NAND connected. - */ - if (err_byte < ECC_SECTOR_SIZE) { - struct mtd_info *mtd = - nand_to_mtd(&denali->nand); - int offset; - - offset = (err_sector * - ECC_SECTOR_SIZE + - err_byte) * - denali->devnum + - err_device; - /* correct the ECC error */ - buf[offset] ^= err_correction_value; - mtd->ecc_stats.corrected++; - bitflips++; - } - } else { - /* - * if the error is not correctable, need to - * look at the page to see if it is an erased - * page. if so, then it's not a real ECC error - */ - check_erased_page = true; - } - } while (!ECC_LAST_ERR(err_correction_info)); - /* - * Once handle all ecc errors, controller will triger - * a ECC_TRANSACTION_DONE interrupt, so here just wait - * for a while for this interrupt - */ - while (!(read_interrupt_status(denali) & - INTR_STATUS__ECC_TRANSACTION_DONE)) - cpu_relax(); - clear_interrupts(denali); - denali_set_intr_modes(denali, true); - } - *max_bitflips = bitflips; - return check_erased_page; + /* read the ECC errors. we'll ignore them for now */ + denali_set_intr_modes(denali, false); + + do { + err_addr = ioread32(denali->flash_reg + ECC_ERROR_ADDRESS); + err_sector = ECC_SECTOR(err_addr); + err_byte = ECC_BYTE(err_addr); + + err_cor_info = ioread32(denali->flash_reg + ERR_CORRECTION_INFO); + err_cor_value = ECC_CORRECTION_VALUE(err_cor_info); + err_device = ECC_ERR_DEVICE(err_cor_info); + + /* reset the bitflip counter when crossing ECC sector */ + if (err_sector != prev_sector) + bitflips = 0; + + if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) { + /* + * Check later if this is a real ECC error, or + * an erased sector. + */ + *uncor_ecc_flags |= BIT(err_sector); + } else if (err_byte < ECC_SECTOR_SIZE) { + /* + * If err_byte is larger than ECC_SECTOR_SIZE, means error + * happened in OOB, so we ignore it. It's no need for + * us to correct it err_device is represented the NAND + * error bits are happened in if there are more than + * one NAND connected. + */ + int offset; + unsigned int flips_in_byte; + + offset = (err_sector * ECC_SECTOR_SIZE + err_byte) * + denali->devnum + err_device; + + /* correct the ECC error */ + flips_in_byte = hweight8(buf[offset] ^ err_cor_value); + buf[offset] ^= err_cor_value; + mtd->ecc_stats.corrected += flips_in_byte; + bitflips += flips_in_byte; + + max_bitflips = max(max_bitflips, bitflips); + } + + prev_sector = err_sector; + } while (!ECC_LAST_ERR(err_cor_info)); + + /* + * Once handle all ecc errors, controller will trigger a + * ECC_TRANSACTION_DONE interrupt, so here just wait for + * a while for this interrupt + */ + while (!(read_interrupt_status(denali) & INTR__ECC_TRANSACTION_DONE)) + cpu_relax(); + clear_interrupts(denali); + denali_set_intr_modes(denali, true); + + return max_bitflips; } /* programs the controller to either enable/disable DMA transfers */ @@ -991,8 +974,30 @@ static void denali_enable_dma(struct denali_nand_info *denali, bool en) ioread32(denali->flash_reg + DMA_ENABLE); } -/* setups the HW to perform the data DMA */ -static void denali_setup_dma(struct denali_nand_info *denali, int op) +static void denali_setup_dma64(struct denali_nand_info *denali, int op) +{ + uint32_t mode; + const int page_count = 1; + uint64_t addr = denali->buf.dma_buf; + + mode = MODE_10 | BANK(denali->flash_bank) | denali->page; + + /* DMA is a three step process */ + + /* + * 1. setup transfer type, interrupt when complete, + * burst len = 64 bytes, the number of pages + */ + index_addr(denali, mode, 0x01002000 | (64 << 16) | op | page_count); + + /* 2. set memory low address */ + index_addr(denali, mode, addr); + + /* 3. set memory high address */ + index_addr(denali, mode, addr >> 32); +} + +static void denali_setup_dma32(struct denali_nand_info *denali, int op) { uint32_t mode; const int page_count = 1; @@ -1015,6 +1020,14 @@ static void denali_setup_dma(struct denali_nand_info *denali, int op) index_addr(denali, mode | 0x14000, 0x2400); } +static void denali_setup_dma(struct denali_nand_info *denali, int op) +{ + if (denali->caps & DENALI_CAP_DMA_64BIT) + denali_setup_dma64(denali, op); + else + denali_setup_dma32(denali, op); +} + /* * writes a page. user specifies type, and this function handles the * configuration details. @@ -1026,8 +1039,7 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *chip, dma_addr_t addr = denali->buf.dma_buf; size_t size = mtd->writesize + mtd->oobsize; uint32_t irq_status; - uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP | - INTR_STATUS__PROGRAM_FAIL; + uint32_t irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL; /* * if it is a raw xfer, we want to disable ecc and send the spare area. @@ -1118,16 +1130,15 @@ static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { - unsigned int max_bitflips; struct denali_nand_info *denali = mtd_to_denali(mtd); - dma_addr_t addr = denali->buf.dma_buf; size_t size = mtd->writesize + mtd->oobsize; - uint32_t irq_status; - uint32_t irq_mask = INTR_STATUS__ECC_TRANSACTION_DONE | - INTR_STATUS__ECC_ERR; - bool check_erased_page = false; + uint32_t irq_mask = denali->caps & DENALI_CAP_HW_ECC_FIXUP ? + INTR__DMA_CMD_COMP | INTR__ECC_UNCOR_ERR : + INTR__ECC_TRANSACTION_DONE | INTR__ECC_ERR; + unsigned long uncor_ecc_flags = 0; + int stat = 0; if (page != denali->page) { dev_err(denali->dev, @@ -1151,21 +1162,23 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, memcpy(buf, denali->buf.buf, mtd->writesize); - check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips); + if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) + stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags); + else if (irq_status & INTR__ECC_ERR) + stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf); denali_enable_dma(denali, false); - if (check_erased_page) { + if (stat < 0) + return stat; + + if (uncor_ecc_flags) { read_oob_data(mtd, chip->oob_poi, denali->page); - /* check ECC failures that may have occurred on erased pages */ - if (check_erased_page) { - if (!is_erased(buf, mtd->writesize)) - mtd->ecc_stats.failed++; - if (!is_erased(buf, mtd->oobsize)) - mtd->ecc_stats.failed++; - } + stat = denali_check_erased_page(mtd, chip, buf, + uncor_ecc_flags, stat); } - return max_bitflips; + + return stat; } static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, @@ -1174,7 +1187,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, struct denali_nand_info *denali = mtd_to_denali(mtd); dma_addr_t addr = denali->buf.dma_buf; size_t size = mtd->writesize + mtd->oobsize; - uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP; + uint32_t irq_mask = INTR__DMA_CMD_COMP; if (page != denali->page) { dev_err(denali->dev, @@ -1247,10 +1260,9 @@ static int denali_erase(struct mtd_info *mtd, int page) index_addr(denali, cmd, 0x1); /* wait for erase to complete or failure to occur */ - irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP | - INTR_STATUS__ERASE_FAIL); + irq_status = wait_for_irq(denali, INTR__ERASE_COMP | INTR__ERASE_FAIL); - return irq_status & INTR_STATUS__ERASE_FAIL ? NAND_STATUS_FAIL : PASS; + return irq_status & INTR__ERASE_FAIL ? NAND_STATUS_FAIL : PASS; } static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, @@ -1303,6 +1315,14 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, static void denali_hw_init(struct denali_nand_info *denali) { /* + * The REVISION register may not be reliable. Platforms are allowed to + * override it. + */ + if (!denali->revision) + denali->revision = + swab16(ioread32(denali->flash_reg + REVISION)); + + /* * tell driver how many bit controller will skip before * writing ECC code in OOB, this register may be already * set by firmware. So we read this value out. @@ -1413,9 +1433,61 @@ static void denali_drv_init(struct denali_nand_info *denali) denali->irq_status = 0; } +static int denali_multidev_fixup(struct denali_nand_info *denali) +{ + struct nand_chip *chip = &denali->nand; + struct mtd_info *mtd = nand_to_mtd(chip); + + /* + * Support for multi device: + * When the IP configuration is x16 capable and two x8 chips are + * connected in parallel, DEVICES_CONNECTED should be set to 2. + * In this case, the core framework knows nothing about this fact, + * so we should tell it the _logical_ pagesize and anything necessary. + */ + denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED); + + /* + * On some SoCs, DEVICES_CONNECTED is not auto-detected. + * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case. + */ + if (denali->devnum == 0) { + denali->devnum = 1; + iowrite32(1, denali->flash_reg + DEVICES_CONNECTED); + } + + if (denali->devnum == 1) + return 0; + + if (denali->devnum != 2) { + dev_err(denali->dev, "unsupported number of devices %d\n", + denali->devnum); + return -EINVAL; + } + + /* 2 chips in parallel */ + mtd->size <<= 1; + mtd->erasesize <<= 1; + mtd->writesize <<= 1; + mtd->oobsize <<= 1; + chip->chipsize <<= 1; + chip->page_shift += 1; + chip->phys_erase_shift += 1; + chip->bbt_erase_shift += 1; + chip->chip_shift += 1; + chip->pagemask <<= 1; + chip->ecc.size <<= 1; + chip->ecc.bytes <<= 1; + chip->ecc.strength <<= 1; + denali->bbtskipbytes <<= 1; + + return 0; +} + int denali_init(struct denali_nand_info *denali) { - struct mtd_info *mtd = nand_to_mtd(&denali->nand); + struct nand_chip *chip = &denali->nand; + struct mtd_info *mtd = nand_to_mtd(chip); int ret; if (denali->platform == INTEL_CE4100) { @@ -1449,13 +1521,16 @@ int denali_init(struct denali_nand_info *denali) /* now that our ISR is registered, we can enable interrupts */ denali_set_intr_modes(denali, true); - mtd->name = "denali-nand"; + nand_set_flash_node(chip, denali->dev->of_node); + /* Fallback to the default name if DT did not give "label" property */ + if (!mtd->name) + mtd->name = "denali-nand"; /* register the driver with the NAND core subsystem */ - denali->nand.select_chip = denali_select_chip; - denali->nand.cmdfunc = denali_cmdfunc; - denali->nand.read_byte = denali_read_byte; - denali->nand.waitfunc = denali_waitfunc; + chip->select_chip = denali_select_chip; + chip->cmdfunc = denali_cmdfunc; + chip->read_byte = denali_read_byte; + chip->waitfunc = denali_waitfunc; /* * scan for NAND devices attached to the controller @@ -1476,8 +1551,9 @@ int denali_init(struct denali_nand_info *denali) goto failed_req_irq; } - /* Is 32-bit DMA supported? */ - ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32)); + ret = dma_set_mask(denali->dev, + DMA_BIT_MASK(denali->caps & DENALI_CAP_DMA_64BIT ? + 64 : 32)); if (ret) { dev_err(denali->dev, "No usable DMA configuration\n"); goto failed_req_irq; @@ -1493,54 +1569,35 @@ int denali_init(struct denali_nand_info *denali) } /* - * support for multi nand - * MTD known nothing about multi nand, so we should tell it - * the real pagesize and anything necessery - */ - denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED); - denali->nand.chipsize <<= denali->devnum - 1; - denali->nand.page_shift += denali->devnum - 1; - denali->nand.pagemask = (denali->nand.chipsize >> - denali->nand.page_shift) - 1; - denali->nand.bbt_erase_shift += denali->devnum - 1; - denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift; - denali->nand.chip_shift += denali->devnum - 1; - mtd->writesize <<= denali->devnum - 1; - mtd->oobsize <<= denali->devnum - 1; - mtd->erasesize <<= denali->devnum - 1; - mtd->size = denali->nand.numchips * denali->nand.chipsize; - denali->bbtskipbytes *= denali->devnum; - - /* * second stage of the NAND scan * this stage requires information regarding ECC and * bad block management. */ /* Bad block management */ - denali->nand.bbt_td = &bbt_main_descr; - denali->nand.bbt_md = &bbt_mirror_descr; + chip->bbt_td = &bbt_main_descr; + chip->bbt_md = &bbt_mirror_descr; /* skip the scan for now until we have OOB read and write support */ - denali->nand.bbt_options |= NAND_BBT_USE_FLASH; - denali->nand.options |= NAND_SKIP_BBTSCAN; - denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; + chip->bbt_options |= NAND_BBT_USE_FLASH; + chip->options |= NAND_SKIP_BBTSCAN; + chip->ecc.mode = NAND_ECC_HW_SYNDROME; /* no subpage writes on denali */ - denali->nand.options |= NAND_NO_SUBPAGE_WRITE; + chip->options |= NAND_NO_SUBPAGE_WRITE; /* * Denali Controller only support 15bit and 8bit ECC in MRST, * so just let controller do 15bit ECC for MLC and 8bit ECC for * SLC if possible. * */ - if (!nand_is_slc(&denali->nand) && + if (!nand_is_slc(chip) && (mtd->oobsize > (denali->bbtskipbytes + ECC_15BITS * (mtd->writesize / ECC_SECTOR_SIZE)))) { /* if MLC OOB size is large enough, use 15bit ECC*/ - denali->nand.ecc.strength = 15; - denali->nand.ecc.bytes = ECC_15BITS; + chip->ecc.strength = 15; + chip->ecc.bytes = ECC_15BITS; iowrite32(15, denali->flash_reg + ECC_CORRECTION); } else if (mtd->oobsize < (denali->bbtskipbytes + ECC_8BITS * (mtd->writesize / @@ -1548,24 +1605,26 @@ int denali_init(struct denali_nand_info *denali) pr_err("Your NAND chip OOB is not large enough to contain 8bit ECC correction codes"); goto failed_req_irq; } else { - denali->nand.ecc.strength = 8; - denali->nand.ecc.bytes = ECC_8BITS; + chip->ecc.strength = 8; + chip->ecc.bytes = ECC_8BITS; iowrite32(8, denali->flash_reg + ECC_CORRECTION); } mtd_set_ooblayout(mtd, &denali_ooblayout_ops); - denali->nand.ecc.bytes *= denali->devnum; - denali->nand.ecc.strength *= denali->devnum; /* override the default read operations */ - denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum; - denali->nand.ecc.read_page = denali_read_page; - denali->nand.ecc.read_page_raw = denali_read_page_raw; - denali->nand.ecc.write_page = denali_write_page; - denali->nand.ecc.write_page_raw = denali_write_page_raw; - denali->nand.ecc.read_oob = denali_read_oob; - denali->nand.ecc.write_oob = denali_write_oob; - denali->nand.erase = denali_erase; + chip->ecc.size = ECC_SECTOR_SIZE; + chip->ecc.read_page = denali_read_page; + chip->ecc.read_page_raw = denali_read_page_raw; + chip->ecc.write_page = denali_write_page; + chip->ecc.write_page_raw = denali_write_page_raw; + chip->ecc.read_oob = denali_read_oob; + chip->ecc.write_oob = denali_write_oob; + chip->erase = denali_erase; + + ret = denali_multidev_fixup(denali); + if (ret) + goto failed_req_irq; ret = nand_scan_tail(mtd); if (ret) diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h index ea22191e8515..ec004850652a 100644 --- a/drivers/mtd/nand/denali.h +++ b/drivers/mtd/nand/denali.h @@ -20,6 +20,7 @@ #ifndef __DENALI_H__ #define __DENALI_H__ +#include <linux/bitops.h> #include <linux/mtd/nand.h> #define DEVICE_RESET 0x0 @@ -178,8 +179,6 @@ #define REVISION 0x370 #define REVISION__VALUE 0xffff -#define MAKE_COMPARABLE_REVISION(x) swab16((x) & REVISION__VALUE) -#define REVISION_5_1 0x00000501 #define ONFI_DEVICE_FEATURES 0x380 #define ONFI_DEVICE_FEATURES__VALUE 0x003f @@ -218,65 +217,29 @@ #define INTR_STATUS(__bank) (0x410 + ((__bank) * 0x50)) #define INTR_EN(__bank) (0x420 + ((__bank) * 0x50)) - -#define INTR_STATUS__ECC_TRANSACTION_DONE 0x0001 -#define INTR_STATUS__ECC_ERR 0x0002 -#define INTR_STATUS__DMA_CMD_COMP 0x0004 -#define INTR_STATUS__TIME_OUT 0x0008 -#define INTR_STATUS__PROGRAM_FAIL 0x0010 -#define INTR_STATUS__ERASE_FAIL 0x0020 -#define INTR_STATUS__LOAD_COMP 0x0040 -#define INTR_STATUS__PROGRAM_COMP 0x0080 -#define INTR_STATUS__ERASE_COMP 0x0100 -#define INTR_STATUS__PIPE_CPYBCK_CMD_COMP 0x0200 -#define INTR_STATUS__LOCKED_BLK 0x0400 -#define INTR_STATUS__UNSUP_CMD 0x0800 -#define INTR_STATUS__INT_ACT 0x1000 -#define INTR_STATUS__RST_COMP 0x2000 -#define INTR_STATUS__PIPE_CMD_ERR 0x4000 -#define INTR_STATUS__PAGE_XFER_INC 0x8000 - -#define INTR_EN__ECC_TRANSACTION_DONE 0x0001 -#define INTR_EN__ECC_ERR 0x0002 -#define INTR_EN__DMA_CMD_COMP 0x0004 -#define INTR_EN__TIME_OUT 0x0008 -#define INTR_EN__PROGRAM_FAIL 0x0010 -#define INTR_EN__ERASE_FAIL 0x0020 -#define INTR_EN__LOAD_COMP 0x0040 -#define INTR_EN__PROGRAM_COMP 0x0080 -#define INTR_EN__ERASE_COMP 0x0100 -#define INTR_EN__PIPE_CPYBCK_CMD_COMP 0x0200 -#define INTR_EN__LOCKED_BLK 0x0400 -#define INTR_EN__UNSUP_CMD 0x0800 -#define INTR_EN__INT_ACT 0x1000 -#define INTR_EN__RST_COMP 0x2000 -#define INTR_EN__PIPE_CMD_ERR 0x4000 -#define INTR_EN__PAGE_XFER_INC 0x8000 +/* bit[1:0] is used differently depending on IP version */ +#define INTR__ECC_UNCOR_ERR 0x0001 /* new IP */ +#define INTR__ECC_TRANSACTION_DONE 0x0001 /* old IP */ +#define INTR__ECC_ERR 0x0002 /* old IP */ +#define INTR__DMA_CMD_COMP 0x0004 +#define INTR__TIME_OUT 0x0008 +#define INTR__PROGRAM_FAIL 0x0010 +#define INTR__ERASE_FAIL 0x0020 +#define INTR__LOAD_COMP 0x0040 +#define INTR__PROGRAM_COMP 0x0080 +#define INTR__ERASE_COMP 0x0100 +#define INTR__PIPE_CPYBCK_CMD_COMP 0x0200 +#define INTR__LOCKED_BLK 0x0400 +#define INTR__UNSUP_CMD 0x0800 +#define INTR__INT_ACT 0x1000 +#define INTR__RST_COMP 0x2000 +#define INTR__PIPE_CMD_ERR 0x4000 +#define INTR__PAGE_XFER_INC 0x8000 #define PAGE_CNT(__bank) (0x430 + ((__bank) * 0x50)) #define ERR_PAGE_ADDR(__bank) (0x440 + ((__bank) * 0x50)) #define ERR_BLOCK_ADDR(__bank) (0x450 + ((__bank) * 0x50)) -#define DATA_INTR 0x550 -#define DATA_INTR__WRITE_SPACE_AV 0x0001 -#define DATA_INTR__READ_DATA_AV 0x0002 - -#define DATA_INTR_EN 0x560 -#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001 -#define DATA_INTR_EN__READ_DATA_AV 0x0002 - -#define GPREG_0 0x570 -#define GPREG_0__VALUE 0xffff - -#define GPREG_1 0x580 -#define GPREG_1__VALUE 0xffff - -#define GPREG_2 0x590 -#define GPREG_2__VALUE 0xffff - -#define GPREG_3 0x5a0 -#define GPREG_3__VALUE 0xffff - #define ECC_THRESHOLD 0x600 #define ECC_THRESHOLD__VALUE 0x03ff @@ -297,6 +260,11 @@ #define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000 #define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000 +#define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10) +#define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8) +#define ECC_COR_INFO__MAX_ERRORS 0x007f +#define ECC_COR_INFO__UNCOR_ERR 0x0080 + #define DMA_ENABLE 0x700 #define DMA_ENABLE__FLAG 0x0001 @@ -304,20 +272,13 @@ #define IGNORE_ECC_DONE__FLAG 0x0001 #define DMA_INTR 0x720 +#define DMA_INTR_EN 0x730 #define DMA_INTR__TARGET_ERROR 0x0001 #define DMA_INTR__DESC_COMP_CHANNEL0 0x0002 #define DMA_INTR__DESC_COMP_CHANNEL1 0x0004 #define DMA_INTR__DESC_COMP_CHANNEL2 0x0008 #define DMA_INTR__DESC_COMP_CHANNEL3 0x0010 -#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020 - -#define DMA_INTR_EN 0x730 -#define DMA_INTR_EN__TARGET_ERROR 0x0001 -#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002 -#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004 -#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008 -#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010 -#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020 +#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020 #define TARGET_ERR_ADDR_LO 0x740 #define TARGET_ERR_ADDR_LO__VALUE 0xffff @@ -331,69 +292,12 @@ #define CHNL_ACTIVE__CHANNEL2 0x0004 #define CHNL_ACTIVE__CHANNEL3 0x0008 -#define ACTIVE_SRC_ID 0x800 -#define ACTIVE_SRC_ID__VALUE 0x00ff - -#define PTN_INTR 0x810 -#define PTN_INTR__CONFIG_ERROR 0x0001 -#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002 -#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004 -#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008 -#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010 -#define PTN_INTR__REG_ACCESS_ERROR 0x0020 - -#define PTN_INTR_EN 0x820 -#define PTN_INTR_EN__CONFIG_ERROR 0x0001 -#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002 -#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004 -#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008 -#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010 -#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020 - -#define PERM_SRC_ID(__bank) (0x830 + ((__bank) * 0x40)) -#define PERM_SRC_ID__SRCID 0x00ff -#define PERM_SRC_ID__DIRECT_ACCESS_ACTIVE 0x0800 -#define PERM_SRC_ID__WRITE_ACTIVE 0x2000 -#define PERM_SRC_ID__READ_ACTIVE 0x4000 -#define PERM_SRC_ID__PARTITION_VALID 0x8000 - -#define MIN_BLK_ADDR(__bank) (0x840 + ((__bank) * 0x40)) -#define MIN_BLK_ADDR__VALUE 0xffff - -#define MAX_BLK_ADDR(__bank) (0x850 + ((__bank) * 0x40)) -#define MAX_BLK_ADDR__VALUE 0xffff - -#define MIN_MAX_BANK(__bank) (0x860 + ((__bank) * 0x40)) -#define MIN_MAX_BANK__MIN_VALUE 0x0003 -#define MIN_MAX_BANK__MAX_VALUE 0x000c - - -/* ffsdefs.h */ -#define CLEAR 0 /*use this to clear a field instead of "fail"*/ -#define SET 1 /*use this to set a field instead of "pass"*/ #define FAIL 1 /*failed flag*/ #define PASS 0 /*success flag*/ -#define ERR -1 /*error flag*/ - -/* lld.h */ -#define GOOD_BLOCK 0 -#define DEFECTIVE_BLOCK 1 -#define READ_ERROR 2 #define CLK_X 5 #define CLK_MULTI 4 -/* KBV - Updated to LNW scratch register address */ -#define SCRATCH_REG_ADDR CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR -#define SCRATCH_REG_SIZE 64 - -#define GLOB_HWCTL_DEFAULT_BLKS 2048 - -#define SUPPORT_15BITECC 1 -#define SUPPORT_8BITECC 1 - -#define CUSTOM_CONF_PARAMS 0 - #define ONFI_BLOOM_TIME 1 #define MODE5_WORKAROUND 0 @@ -403,31 +307,6 @@ #define MODE_10 0x08000000 #define MODE_11 0x0C000000 - -#define DATA_TRANSFER_MODE 0 -#define PROTECTION_PER_BLOCK 1 -#define LOAD_WAIT_COUNT 2 -#define PROGRAM_WAIT_COUNT 3 -#define ERASE_WAIT_COUNT 4 -#define INT_MONITOR_CYCLE_COUNT 5 -#define READ_BUSY_PIN_ENABLED 6 -#define MULTIPLANE_OPERATION_SUPPORT 7 -#define PRE_FETCH_MODE 8 -#define CE_DONT_CARE_SUPPORT 9 -#define COPYBACK_SUPPORT 10 -#define CACHE_WRITE_SUPPORT 11 -#define CACHE_READ_SUPPORT 12 -#define NUM_PAGES_IN_BLOCK 13 -#define ECC_ENABLE_SELECT 14 -#define WRITE_ENABLE_2_READ_ENABLE 15 -#define ADDRESS_2_DATA 16 -#define READ_ENABLE_2_WRITE_ENABLE 17 -#define TWO_ROW_ADDRESS_CYCLES 18 -#define MULTIPLANE_ADDRESS_RESTRICT 19 -#define ACC_CLOCKS 20 -#define READ_WRITE_ENABLE_LOW_COUNT 21 -#define READ_WRITE_ENABLE_HIGH_COUNT 22 - #define ECC_SECTOR_SIZE 512 struct nand_buf { @@ -449,23 +328,26 @@ struct denali_nand_info { struct nand_buf buf; struct device *dev; int total_used_banks; - uint32_t block; /* stored for future use */ - uint16_t page; - void __iomem *flash_reg; /* Mapped io reg base address */ - void __iomem *flash_mem; /* Mapped io reg base address */ + int page; + void __iomem *flash_reg; /* Register Interface */ + void __iomem *flash_mem; /* Host Data/Command Interface */ /* elements used by ISR */ struct completion complete; spinlock_t irq_lock; uint32_t irq_status; - int irq_debug_array[32]; int irq; - uint32_t devnum; /* represent how many nands connected */ - uint32_t bbtskipbytes; - uint32_t max_banks; + int devnum; /* represent how many nands connected */ + int bbtskipbytes; + int max_banks; + unsigned int revision; + unsigned int caps; }; +#define DENALI_CAP_HW_ECC_FIXUP BIT(0) +#define DENALI_CAP_DMA_64BIT BIT(1) + extern int denali_init(struct denali_nand_info *denali); extern void denali_remove(struct denali_nand_info *denali); diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c index 5607fcd3b8ed..df9ef36cc2ce 100644 --- a/drivers/mtd/nand/denali_dt.c +++ b/drivers/mtd/nand/denali_dt.c @@ -29,64 +29,66 @@ struct denali_dt { struct clk *clk; }; -static const struct of_device_id denali_nand_dt_ids[] = { - { .compatible = "denali,denali-nand-dt" }, - { /* sentinel */ } - }; +struct denali_dt_data { + unsigned int revision; + unsigned int caps; +}; -MODULE_DEVICE_TABLE(of, denali_nand_dt_ids); +static const struct denali_dt_data denali_socfpga_data = { + .caps = DENALI_CAP_HW_ECC_FIXUP, +}; -static u64 denali_dma_mask; +static const struct of_device_id denali_nand_dt_ids[] = { + { + .compatible = "altr,socfpga-denali-nand", + .data = &denali_socfpga_data, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, denali_nand_dt_ids); -static int denali_dt_probe(struct platform_device *ofdev) +static int denali_dt_probe(struct platform_device *pdev) { struct resource *denali_reg, *nand_data; struct denali_dt *dt; + const struct denali_dt_data *data; struct denali_nand_info *denali; int ret; - const struct of_device_id *of_id; - of_id = of_match_device(denali_nand_dt_ids, &ofdev->dev); - if (of_id) { - ofdev->id_entry = of_id->data; - } else { - pr_err("Failed to find the right device id.\n"); - return -ENOMEM; - } - - dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL); + dt = devm_kzalloc(&pdev->dev, sizeof(*dt), GFP_KERNEL); if (!dt) return -ENOMEM; denali = &dt->denali; + data = of_device_get_match_data(&pdev->dev); + if (data) { + denali->revision = data->revision; + denali->caps = data->caps; + } + denali->platform = DT; - denali->dev = &ofdev->dev; - denali->irq = platform_get_irq(ofdev, 0); + denali->dev = &pdev->dev; + denali->irq = platform_get_irq(pdev, 0); if (denali->irq < 0) { - dev_err(&ofdev->dev, "no irq defined\n"); + dev_err(&pdev->dev, "no irq defined\n"); return denali->irq; } - denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg"); - denali->flash_reg = devm_ioremap_resource(&ofdev->dev, denali_reg); + denali_reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "denali_reg"); + denali->flash_reg = devm_ioremap_resource(&pdev->dev, denali_reg); if (IS_ERR(denali->flash_reg)) return PTR_ERR(denali->flash_reg); - nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data"); - denali->flash_mem = devm_ioremap_resource(&ofdev->dev, nand_data); + nand_data = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "nand_data"); + denali->flash_mem = devm_ioremap_resource(&pdev->dev, nand_data); if (IS_ERR(denali->flash_mem)) return PTR_ERR(denali->flash_mem); - if (!of_property_read_u32(ofdev->dev.of_node, - "dma-mask", (u32 *)&denali_dma_mask)) { - denali->dev->dma_mask = &denali_dma_mask; - } else { - denali->dev->dma_mask = NULL; - } - - dt->clk = devm_clk_get(&ofdev->dev, NULL); + dt->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(dt->clk)) { - dev_err(&ofdev->dev, "no clk available\n"); + dev_err(&pdev->dev, "no clk available\n"); return PTR_ERR(dt->clk); } clk_prepare_enable(dt->clk); @@ -95,7 +97,7 @@ static int denali_dt_probe(struct platform_device *ofdev) if (ret) goto out_disable_clk; - platform_set_drvdata(ofdev, dt); + platform_set_drvdata(pdev, dt); return 0; out_disable_clk: @@ -104,9 +106,9 @@ out_disable_clk: return ret; } -static int denali_dt_remove(struct platform_device *ofdev) +static int denali_dt_remove(struct platform_device *pdev) { - struct denali_dt *dt = platform_get_drvdata(ofdev); + struct denali_dt *dt = platform_get_drvdata(pdev); denali_remove(&dt->denali); clk_disable_unprepare(dt->clk); diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index bda1e4667138..cea50d2f218c 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c @@ -38,15 +38,6 @@ #include <linux/amba/bus.h> #include <mtd/mtd-abi.h> -#define FSMC_NAND_BW8 1 -#define FSMC_NAND_BW16 2 - -#define FSMC_MAX_NOR_BANKS 4 -#define FSMC_MAX_NAND_BANKS 4 - -#define FSMC_FLASH_WIDTH8 1 -#define FSMC_FLASH_WIDTH16 2 - /* fsmc controller registers for NOR flash */ #define CTRL 0x0 /* ctrl register definitions */ @@ -133,33 +124,48 @@ enum access_mode { }; /** - * fsmc_nand_platform_data - platform specific NAND controller config - * @nand_timings: timing setup for the physical NAND interface - * @partitions: partition table for the platform, use a default fallback - * if this is NULL - * @nr_partitions: the number of partitions in the previous entry - * @options: different options for the driver - * @width: bus width - * @bank: default bank - * @select_bank: callback to select a certain bank, this is - * platform-specific. If the controller only supports one bank - * this may be set to NULL + * struct fsmc_nand_data - structure for FSMC NAND device state + * + * @pid: Part ID on the AMBA PrimeCell format + * @mtd: MTD info for a NAND flash. + * @nand: Chip related info for a NAND flash. + * @partitions: Partition info for a NAND Flash. + * @nr_partitions: Total number of partition of a NAND flash. + * + * @bank: Bank number for probed device. + * @clk: Clock structure for FSMC. + * + * @read_dma_chan: DMA channel for read access + * @write_dma_chan: DMA channel for write access to NAND + * @dma_access_complete: Completion structure + * + * @data_pa: NAND Physical port for Data. + * @data_va: NAND port for Data. + * @cmd_va: NAND port for Command. + * @addr_va: NAND port for Address. + * @regs_va: FSMC regs base address. */ -struct fsmc_nand_platform_data { - struct fsmc_nand_timings *nand_timings; - struct mtd_partition *partitions; - unsigned int nr_partitions; - unsigned int options; - unsigned int width; - unsigned int bank; +struct fsmc_nand_data { + u32 pid; + struct nand_chip nand; + unsigned int bank; + struct device *dev; enum access_mode mode; + struct clk *clk; - void (*select_bank)(uint32_t bank, uint32_t busw); + /* DMA related objects */ + struct dma_chan *read_dma_chan; + struct dma_chan *write_dma_chan; + struct completion dma_access_complete; - /* priv structures for dma accesses */ - void *read_dma_priv; - void *write_dma_priv; + struct fsmc_nand_timings *dev_timings; + + dma_addr_t data_pa; + void __iomem *data_va; + void __iomem *cmd_va; + void __iomem *addr_va; + void __iomem *regs_va; }; static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section, @@ -246,86 +252,11 @@ static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = { .free = fsmc_ecc4_ooblayout_free, }; -/** - * struct fsmc_nand_data - structure for FSMC NAND device state - * - * @pid: Part ID on the AMBA PrimeCell format - * @mtd: MTD info for a NAND flash. - * @nand: Chip related info for a NAND flash. - * @partitions: Partition info for a NAND Flash. - * @nr_partitions: Total number of partition of a NAND flash. - * - * @bank: Bank number for probed device. - * @clk: Clock structure for FSMC. - * - * @read_dma_chan: DMA channel for read access - * @write_dma_chan: DMA channel for write access to NAND - * @dma_access_complete: Completion structure - * - * @data_pa: NAND Physical port for Data. - * @data_va: NAND port for Data. - * @cmd_va: NAND port for Command. - * @addr_va: NAND port for Address. - * @regs_va: FSMC regs base address. - */ -struct fsmc_nand_data { - u32 pid; - struct nand_chip nand; - struct mtd_partition *partitions; - unsigned int nr_partitions; - - unsigned int bank; - struct device *dev; - enum access_mode mode; - struct clk *clk; - - /* DMA related objects */ - struct dma_chan *read_dma_chan; - struct dma_chan *write_dma_chan; - struct completion dma_access_complete; - - struct fsmc_nand_timings *dev_timings; - - dma_addr_t data_pa; - void __iomem *data_va; - void __iomem *cmd_va; - void __iomem *addr_va; - void __iomem *regs_va; - - void (*select_chip)(uint32_t bank, uint32_t busw); -}; - static inline struct fsmc_nand_data *mtd_to_fsmc(struct mtd_info *mtd) { return container_of(mtd_to_nand(mtd), struct fsmc_nand_data, nand); } -/* Assert CS signal based on chipnr */ -static void fsmc_select_chip(struct mtd_info *mtd, int chipnr) -{ - struct nand_chip *chip = mtd_to_nand(mtd); - struct fsmc_nand_data *host; - - host = mtd_to_fsmc(mtd); - - switch (chipnr) { - case -1: - chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE); - break; - case 0: - case 1: - case 2: - case 3: - if (host->select_chip) - host->select_chip(chipnr, - chip->options & NAND_BUSWIDTH_16); - break; - - default: - dev_err(host->dev, "unsupported chip-select %d\n", chipnr); - } -} - /* * fsmc_cmd_ctrl - For facilitaing Hardware access * This routine allows hardware specific access to control-lines(ALE,CLE) @@ -838,44 +769,46 @@ static bool filter(struct dma_chan *chan, void *slave) } static int fsmc_nand_probe_config_dt(struct platform_device *pdev, - struct device_node *np) + struct fsmc_nand_data *host, + struct nand_chip *nand) { - struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct device_node *np = pdev->dev.of_node; u32 val; int ret; - /* Set default NAND width to 8 bits */ - pdata->width = 8; + nand->options = 0; + if (!of_property_read_u32(np, "bank-width", &val)) { if (val == 2) { - pdata->width = 16; + nand->options |= NAND_BUSWIDTH_16; } else if (val != 1) { dev_err(&pdev->dev, "invalid bank-width %u\n", val); return -EINVAL; } } + if (of_get_property(np, "nand-skip-bbtscan", NULL)) - pdata->options = NAND_SKIP_BBTSCAN; + nand->options |= NAND_SKIP_BBTSCAN; - pdata->nand_timings = devm_kzalloc(&pdev->dev, - sizeof(*pdata->nand_timings), GFP_KERNEL); - if (!pdata->nand_timings) + host->dev_timings = devm_kzalloc(&pdev->dev, + sizeof(*host->dev_timings), GFP_KERNEL); + if (!host->dev_timings) return -ENOMEM; - ret = of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings, - sizeof(*pdata->nand_timings)); + ret = of_property_read_u8_array(np, "timings", (u8 *)host->dev_timings, + sizeof(*host->dev_timings)); if (ret) { dev_info(&pdev->dev, "No timings in dts specified, using default timings!\n"); - pdata->nand_timings = NULL; + host->dev_timings = NULL; } /* Set default NAND bank to 0 */ - pdata->bank = 0; + host->bank = 0; if (!of_property_read_u32(np, "bank", &val)) { if (val > 3) { dev_err(&pdev->dev, "invalid bank %u\n", val); return -EINVAL; } - pdata->bank = val; + host->bank = val; } return 0; } @@ -886,8 +819,6 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev, */ static int __init fsmc_nand_probe(struct platform_device *pdev) { - struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev); - struct device_node __maybe_unused *np = pdev->dev.of_node; struct fsmc_nand_data *host; struct mtd_info *mtd; struct nand_chip *nand; @@ -897,22 +828,17 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) u32 pid; int i; - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return -ENOMEM; - - pdev->dev.platform_data = pdata; - ret = fsmc_nand_probe_config_dt(pdev, np); - if (ret) { - dev_err(&pdev->dev, "no platform data\n"); - return -ENODEV; - } - /* Allocate memory for the device structure (and zero it) */ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); if (!host) return -ENOMEM; + nand = &host->nand; + + ret = fsmc_nand_probe_config_dt(pdev, host, nand); + if (ret) + return ret; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); host->data_va = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(host->data_va)) @@ -935,7 +861,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) if (IS_ERR(host->regs_va)) return PTR_ERR(host->regs_va); - host->clk = clk_get(&pdev->dev, NULL); + host->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(host->clk)) { dev_err(&pdev->dev, "failed to fetch block clock\n"); return PTR_ERR(host->clk); @@ -943,7 +869,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) ret = clk_prepare_enable(host->clk); if (ret) - goto err_clk_prepare_enable; + return ret; /* * This device ID is actually a common AMBA ID as used on the @@ -957,22 +883,15 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid), AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid)); - host->bank = pdata->bank; - host->select_chip = pdata->select_bank; - host->partitions = pdata->partitions; - host->nr_partitions = pdata->nr_partitions; host->dev = &pdev->dev; - host->dev_timings = pdata->nand_timings; - host->mode = pdata->mode; if (host->mode == USE_DMA_ACCESS) init_completion(&host->dma_access_complete); /* Link all private pointers */ mtd = nand_to_mtd(&host->nand); - nand = &host->nand; nand_set_controller_data(nand, host); - nand_set_flash_node(nand, np); + nand_set_flash_node(nand, pdev->dev.of_node); mtd->dev.parent = &pdev->dev; nand->IO_ADDR_R = host->data_va; @@ -987,26 +906,18 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) nand->ecc.mode = NAND_ECC_HW; nand->ecc.hwctl = fsmc_enable_hwecc; nand->ecc.size = 512; - nand->options = pdata->options; - nand->select_chip = fsmc_select_chip; nand->badblockbits = 7; - nand_set_flash_node(nand, np); - - if (pdata->width == FSMC_NAND_BW16) - nand->options |= NAND_BUSWIDTH_16; switch (host->mode) { case USE_DMA_ACCESS: dma_cap_zero(mask); dma_cap_set(DMA_MEMCPY, mask); - host->read_dma_chan = dma_request_channel(mask, filter, - pdata->read_dma_priv); + host->read_dma_chan = dma_request_channel(mask, filter, NULL); if (!host->read_dma_chan) { dev_err(&pdev->dev, "Unable to get read dma channel\n"); goto err_req_read_chnl; } - host->write_dma_chan = dma_request_channel(mask, filter, - pdata->write_dma_priv); + host->write_dma_chan = dma_request_channel(mask, filter, NULL); if (!host->write_dma_chan) { dev_err(&pdev->dev, "Unable to get write dma channel\n"); goto err_req_write_chnl; @@ -1107,18 +1018,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) if (ret) goto err_probe; - /* - * The partition information can is accessed by (in the same precedence) - * - * command line through Bootloader, - * platform data, - * default partition information present in driver. - */ - /* - * Check for partition info passed - */ mtd->name = "nand"; - ret = mtd_device_register(mtd, host->partitions, host->nr_partitions); + ret = mtd_device_register(mtd, NULL, 0); if (ret) goto err_probe; @@ -1135,8 +1036,6 @@ err_req_write_chnl: dma_release_channel(host->read_dma_chan); err_req_read_chnl: clk_disable_unprepare(host->clk); -err_clk_prepare_enable: - clk_put(host->clk); return ret; } @@ -1155,7 +1054,6 @@ static int fsmc_nand_remove(struct platform_device *pdev) dma_release_channel(host->read_dma_chan); } clk_disable_unprepare(host->clk); - clk_put(host->clk); } return 0; @@ -1185,20 +1083,18 @@ static int fsmc_nand_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume); -#ifdef CONFIG_OF static const struct of_device_id fsmc_nand_id_table[] = { { .compatible = "st,spear600-fsmc-nand" }, { .compatible = "stericsson,fsmc-nand" }, {} }; MODULE_DEVICE_TABLE(of, fsmc_nand_id_table); -#endif static struct platform_driver fsmc_nand_driver = { .remove = fsmc_nand_remove, .driver = { .name = "fsmc-nand", - .of_match_table = of_match_ptr(fsmc_nand_id_table), + .of_match_table = fsmc_nand_id_table, .pm = &fsmc_nand_pm_ops, }, }; diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c index 0d24857469ab..85294f150f4f 100644 --- a/drivers/mtd/nand/gpio.c +++ b/drivers/mtd/nand/gpio.c @@ -78,7 +78,9 @@ static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) gpio_nand_dosync(gpiomtd); if (ctrl & NAND_CTRL_CHANGE) { - gpio_set_value(gpiomtd->plat.gpio_nce, !(ctrl & NAND_NCE)); + if (gpio_is_valid(gpiomtd->plat.gpio_nce)) + gpio_set_value(gpiomtd->plat.gpio_nce, + !(ctrl & NAND_NCE)); gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE)); gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE)); gpio_nand_dosync(gpiomtd); @@ -201,7 +203,8 @@ static int gpio_nand_remove(struct platform_device *pdev) if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) gpio_set_value(gpiomtd->plat.gpio_nwp, 0); - gpio_set_value(gpiomtd->plat.gpio_nce, 1); + if (gpio_is_valid(gpiomtd->plat.gpio_nce)) + gpio_set_value(gpiomtd->plat.gpio_nce, 1); return 0; } @@ -239,10 +242,13 @@ static int gpio_nand_probe(struct platform_device *pdev) if (ret) return ret; - ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce, "NAND NCE"); - if (ret) - return ret; - gpio_direction_output(gpiomtd->plat.gpio_nce, 1); + if (gpio_is_valid(gpiomtd->plat.gpio_nce)) { + ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce, + "NAND NCE"); + if (ret) + return ret; + gpio_direction_output(gpiomtd->plat.gpio_nce, 1); + } if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) { ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp, diff --git a/drivers/mtd/nand/nand_amd.c b/drivers/mtd/nand/nand_amd.c new file mode 100644 index 000000000000..170403a3bfa8 --- /dev/null +++ b/drivers/mtd/nand/nand_amd.c @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2017 Free Electrons + * Copyright (C) 2017 NextThing Co + * + * Author: Boris Brezillon <boris.brezillon@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/mtd/nand.h> + +static void amd_nand_decode_id(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + + nand_decode_ext_id(chip); + + /* + * Check for Spansion/AMD ID + repeating 5th, 6th byte since + * some Spansion chips have erasesize that conflicts with size + * listed in nand_ids table. + * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39) + */ + if (chip->id.data[4] != 0x00 && chip->id.data[5] == 0x00 && + chip->id.data[6] == 0x00 && chip->id.data[7] == 0x00 && + mtd->writesize == 512) { + mtd->erasesize = 128 * 1024; + mtd->erasesize <<= ((chip->id.data[3] & 0x03) << 1); + } +} + +static int amd_nand_init(struct nand_chip *chip) +{ + if (nand_is_slc(chip)) + chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; + + return 0; +} + +const struct nand_manufacturer_ops amd_nand_manuf_ops = { + .detect = amd_nand_decode_id, + .init = amd_nand_init, +}; diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index b0524f8accb6..d474378ed810 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -139,6 +139,74 @@ const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = { }; EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops); +/* + * Support the old "large page" layout used for 1-bit Hamming ECC where ECC + * are placed at a fixed offset. + */ +static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct nand_ecc_ctrl *ecc = &chip->ecc; + + if (section) + return -ERANGE; + + switch (mtd->oobsize) { + case 64: + oobregion->offset = 40; + break; + case 128: + oobregion->offset = 80; + break; + default: + return -EINVAL; + } + + oobregion->length = ecc->total; + if (oobregion->offset + oobregion->length > mtd->oobsize) + return -ERANGE; + + return 0; +} + +static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int ecc_offset = 0; + + if (section < 0 || section > 1) + return -ERANGE; + + switch (mtd->oobsize) { + case 64: + ecc_offset = 40; + break; + case 128: + ecc_offset = 80; + break; + default: + return -EINVAL; + } + + if (section == 0) { + oobregion->offset = 2; + oobregion->length = ecc_offset - 2; + } else { + oobregion->offset = ecc_offset + ecc->total; + oobregion->length = mtd->oobsize - oobregion->offset; + } + + return 0; +} + +const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = { + .ecc = nand_ooblayout_ecc_lp_hamming, + .free = nand_ooblayout_free_lp_hamming, +}; + static int check_offs_len(struct mtd_info *mtd, loff_t ofs, uint64_t len) { @@ -354,40 +422,32 @@ static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) */ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs) { - int page, res = 0, i = 0; + int page, page_end, res; struct nand_chip *chip = mtd_to_nand(mtd); - u16 bad; + u8 bad; if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) ofs += mtd->erasesize - mtd->writesize; page = (int)(ofs >> chip->page_shift) & chip->pagemask; + page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1); - do { - if (chip->options & NAND_BUSWIDTH_16) { - chip->cmdfunc(mtd, NAND_CMD_READOOB, - chip->badblockpos & 0xFE, page); - bad = cpu_to_le16(chip->read_word(mtd)); - if (chip->badblockpos & 0x1) - bad >>= 8; - else - bad &= 0xFF; - } else { - chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, - page); - bad = chip->read_byte(mtd); - } + for (; page < page_end; page++) { + res = chip->ecc.read_oob(mtd, chip, page); + if (res) + return res; + + bad = chip->oob_poi[chip->badblockpos]; if (likely(chip->badblockbits == 8)) res = bad != 0xFF; else res = hweight8(bad) < chip->badblockbits; - ofs += mtd->writesize; - page = (int)(ofs >> chip->page_shift) & chip->pagemask; - i++; - } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE)); + if (res) + return res; + } - return res; + return 0; } /** @@ -676,6 +736,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, case NAND_CMD_ERASE2: case NAND_CMD_SEQIN: case NAND_CMD_STATUS: + case NAND_CMD_READID: + case NAND_CMD_SET_FEATURES: return; case NAND_CMD_RESET: @@ -794,6 +856,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, case NAND_CMD_ERASE2: case NAND_CMD_SEQIN: case NAND_CMD_STATUS: + case NAND_CMD_READID: + case NAND_CMD_SET_FEATURES: return; case NAND_CMD_RNDIN: @@ -1958,7 +2022,9 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, if (!aligned) use_bufpoi = 1; else if (chip->options & NAND_USE_BOUNCE_BUFFER) - use_bufpoi = !virt_addr_valid(buf); + use_bufpoi = !virt_addr_valid(buf) || + !IS_ALIGNED((unsigned long)buf, + chip->buf_align); else use_bufpoi = 0; @@ -1997,8 +2063,6 @@ read_retry: break; } - max_bitflips = max_t(unsigned int, max_bitflips, ret); - /* Transfer not aligned data */ if (use_bufpoi) { if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && @@ -2049,6 +2113,7 @@ read_retry: } buf += bytes; + max_bitflips = max_t(unsigned int, max_bitflips, ret); } else { memcpy(buf, chip->buffers->databuf + col, bytes); buf += bytes; @@ -2637,7 +2702,7 @@ static int nand_write_page_syndrome(struct mtd_info *mtd, } /** - * nand_write_page - [REPLACEABLE] write one page + * nand_write_page - write one page * @mtd: MTD device structure * @chip: NAND chip descriptor * @offset: address offset within the page @@ -2815,7 +2880,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, if (part_pagewr) use_bufpoi = 1; else if (chip->options & NAND_USE_BOUNCE_BUFFER) - use_bufpoi = !virt_addr_valid(buf); + use_bufpoi = !virt_addr_valid(buf) || + !IS_ALIGNED((unsigned long)buf, + chip->buf_align); else use_bufpoi = 0; @@ -2840,9 +2907,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, /* We still need to erase leftover OOB data */ memset(chip->oob_poi, 0xff, mtd->oobsize); } - ret = chip->write_page(mtd, chip, column, bytes, wbuf, - oob_required, page, cached, - (ops->mode == MTD_OPS_RAW)); + + ret = nand_write_page(mtd, chip, column, bytes, wbuf, + oob_required, page, cached, + (ops->mode == MTD_OPS_RAW)); if (ret) break; @@ -3385,8 +3453,10 @@ static void nand_shutdown(struct mtd_info *mtd) } /* Set default functions */ -static void nand_set_defaults(struct nand_chip *chip, int busw) +static void nand_set_defaults(struct nand_chip *chip) { + unsigned int busw = chip->options & NAND_BUSWIDTH_16; + /* check for proper chip_delay setup, set 20us if not */ if (!chip->chip_delay) chip->chip_delay = 20; @@ -3431,6 +3501,8 @@ static void nand_set_defaults(struct nand_chip *chip, int busw) nand_hw_control_init(chip->controller); } + if (!chip->buf_align) + chip->buf_align = 1; } /* Sanitize ONFI strings so we can safely print them */ @@ -3464,9 +3536,10 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len) } /* Parse the Extended Parameter Page. */ -static int nand_flash_detect_ext_param_page(struct mtd_info *mtd, - struct nand_chip *chip, struct nand_onfi_params *p) +static int nand_flash_detect_ext_param_page(struct nand_chip *chip, + struct nand_onfi_params *p) { + struct mtd_info *mtd = nand_to_mtd(chip); struct onfi_ext_param_page *ep; struct onfi_ext_section *s; struct onfi_ext_ecc_info *ecc; @@ -3534,36 +3607,12 @@ ext_out: return ret; } -static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode) -{ - struct nand_chip *chip = mtd_to_nand(mtd); - uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode}; - - return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY, - feature); -} - -/* - * Configure chip properties from Micron vendor-specific ONFI table - */ -static void nand_onfi_detect_micron(struct nand_chip *chip, - struct nand_onfi_params *p) -{ - struct nand_onfi_vendor_micron *micron = (void *)p->vendor; - - if (le16_to_cpu(p->vendor_revision) < 1) - return; - - chip->read_retries = micron->read_retry_options; - chip->setup_read_retry = nand_setup_read_retry_micron; -} - /* * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise. */ -static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, - int *busw) +static int nand_flash_detect_onfi(struct nand_chip *chip) { + struct mtd_info *mtd = nand_to_mtd(chip); struct nand_onfi_params *p = &chip->onfi_params; int i, j; int val; @@ -3633,9 +3682,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun); if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS) - *busw = NAND_BUSWIDTH_16; - else - *busw = 0; + chip->options |= NAND_BUSWIDTH_16; if (p->ecc_bits != 0xff) { chip->ecc_strength_ds = p->ecc_bits; @@ -3653,24 +3700,21 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, chip->cmdfunc = nand_command_lp; /* The Extended Parameter Page is supported since ONFI 2.1. */ - if (nand_flash_detect_ext_param_page(mtd, chip, p)) + if (nand_flash_detect_ext_param_page(chip, p)) pr_warn("Failed to detect ONFI extended param page\n"); } else { pr_warn("Could not retrieve ONFI ECC requirements\n"); } - if (p->jedec_id == NAND_MFR_MICRON) - nand_onfi_detect_micron(chip, p); - return 1; } /* * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise. */ -static int nand_flash_detect_jedec(struct mtd_info *mtd, struct nand_chip *chip, - int *busw) +static int nand_flash_detect_jedec(struct nand_chip *chip) { + struct mtd_info *mtd = nand_to_mtd(chip); struct nand_jedec_params *p = &chip->jedec_params; struct jedec_ecc_info *ecc; int val; @@ -3729,9 +3773,7 @@ static int nand_flash_detect_jedec(struct mtd_info *mtd, struct nand_chip *chip, chip->bits_per_cell = p->bits_per_cell; if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS) - *busw = NAND_BUSWIDTH_16; - else - *busw = 0; + chip->options |= NAND_BUSWIDTH_16; /* ECC info */ ecc = &p->ecc_info[0]; @@ -3820,165 +3862,46 @@ static int nand_get_bits_per_cell(u8 cellinfo) * chip. The rest of the parameters must be decoded according to generic or * manufacturer-specific "extended ID" decoding patterns. */ -static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip, - u8 id_data[8], int *busw) +void nand_decode_ext_id(struct nand_chip *chip) { - int extid, id_len; + struct mtd_info *mtd = nand_to_mtd(chip); + int extid; + u8 *id_data = chip->id.data; /* The 3rd id byte holds MLC / multichip data */ chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]); /* The 4th id byte is the important one */ extid = id_data[3]; - id_len = nand_id_len(id_data, 8); - - /* - * Field definitions are in the following datasheets: - * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32) - * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44) - * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22) - * - * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung - * ID to decide what to do. - */ - if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG && - !nand_is_slc(chip) && id_data[5] != 0x00) { - /* Calc pagesize */ - mtd->writesize = 2048 << (extid & 0x03); - extid >>= 2; - /* Calc oobsize */ - switch (((extid >> 2) & 0x04) | (extid & 0x03)) { - case 1: - mtd->oobsize = 128; - break; - case 2: - mtd->oobsize = 218; - break; - case 3: - mtd->oobsize = 400; - break; - case 4: - mtd->oobsize = 436; - break; - case 5: - mtd->oobsize = 512; - break; - case 6: - mtd->oobsize = 640; - break; - case 7: - default: /* Other cases are "reserved" (unknown) */ - mtd->oobsize = 1024; - break; - } - extid >>= 2; - /* Calc blocksize */ - mtd->erasesize = (128 * 1024) << - (((extid >> 1) & 0x04) | (extid & 0x03)); - *busw = 0; - } else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX && - !nand_is_slc(chip)) { - unsigned int tmp; - - /* Calc pagesize */ - mtd->writesize = 2048 << (extid & 0x03); - extid >>= 2; - /* Calc oobsize */ - switch (((extid >> 2) & 0x04) | (extid & 0x03)) { - case 0: - mtd->oobsize = 128; - break; - case 1: - mtd->oobsize = 224; - break; - case 2: - mtd->oobsize = 448; - break; - case 3: - mtd->oobsize = 64; - break; - case 4: - mtd->oobsize = 32; - break; - case 5: - mtd->oobsize = 16; - break; - default: - mtd->oobsize = 640; - break; - } - extid >>= 2; - /* Calc blocksize */ - tmp = ((extid >> 1) & 0x04) | (extid & 0x03); - if (tmp < 0x03) - mtd->erasesize = (128 * 1024) << tmp; - else if (tmp == 0x03) - mtd->erasesize = 768 * 1024; - else - mtd->erasesize = (64 * 1024) << tmp; - *busw = 0; - } else { - /* Calc pagesize */ - mtd->writesize = 1024 << (extid & 0x03); - extid >>= 2; - /* Calc oobsize */ - mtd->oobsize = (8 << (extid & 0x01)) * - (mtd->writesize >> 9); - extid >>= 2; - /* Calc blocksize. Blocksize is multiples of 64KiB */ - mtd->erasesize = (64 * 1024) << (extid & 0x03); - extid >>= 2; - /* Get buswidth information */ - *busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0; - - /* - * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per - * 512B page. For Toshiba SLC, we decode the 5th/6th byte as - * follows: - * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm, - * 110b -> 24nm - * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC - */ - if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA && - nand_is_slc(chip) && - (id_data[5] & 0x7) == 0x6 /* 24nm */ && - !(id_data[4] & 0x80) /* !BENAND */) { - mtd->oobsize = 32 * mtd->writesize >> 9; - } - - } + /* Calc pagesize */ + mtd->writesize = 1024 << (extid & 0x03); + extid >>= 2; + /* Calc oobsize */ + mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9); + extid >>= 2; + /* Calc blocksize. Blocksize is multiples of 64KiB */ + mtd->erasesize = (64 * 1024) << (extid & 0x03); + extid >>= 2; + /* Get buswidth information */ + if (extid & 0x1) + chip->options |= NAND_BUSWIDTH_16; } +EXPORT_SYMBOL_GPL(nand_decode_ext_id); /* * Old devices have chip data hardcoded in the device ID table. nand_decode_id * decodes a matching ID table entry and assigns the MTD size parameters for * the chip. */ -static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip, - struct nand_flash_dev *type, u8 id_data[8], - int *busw) +static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type) { - int maf_id = id_data[0]; + struct mtd_info *mtd = nand_to_mtd(chip); mtd->erasesize = type->erasesize; mtd->writesize = type->pagesize; mtd->oobsize = mtd->writesize / 32; - *busw = type->options & NAND_BUSWIDTH_16; /* All legacy ID NAND are small-page, SLC */ chip->bits_per_cell = 1; - - /* - * Check for Spansion/AMD ID + repeating 5th, 6th byte since - * some Spansion chips have erasesize that conflicts with size - * listed in nand_ids table. - * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39) - */ - if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00 - && id_data[6] == 0x00 && id_data[7] == 0x00 - && mtd->writesize == 512) { - mtd->erasesize = 128 * 1024; - mtd->erasesize <<= ((id_data[3] & 0x03) << 1); - } } /* @@ -3986,36 +3909,15 @@ static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip, * heuristic patterns using various detected parameters (e.g., manufacturer, * page size, cell-type information). */ -static void nand_decode_bbm_options(struct mtd_info *mtd, - struct nand_chip *chip, u8 id_data[8]) +static void nand_decode_bbm_options(struct nand_chip *chip) { - int maf_id = id_data[0]; + struct mtd_info *mtd = nand_to_mtd(chip); /* Set the bad block position */ if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16)) chip->badblockpos = NAND_LARGE_BADBLOCK_POS; else chip->badblockpos = NAND_SMALL_BADBLOCK_POS; - - /* - * Bad block marker is stored in the last page of each block on Samsung - * and Hynix MLC devices; stored in first two pages of each block on - * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba, - * AMD/Spansion, and Macronix. All others scan only the first page. - */ - if (!nand_is_slc(chip) && - (maf_id == NAND_MFR_SAMSUNG || - maf_id == NAND_MFR_HYNIX)) - chip->bbt_options |= NAND_BBT_SCANLASTPAGE; - else if ((nand_is_slc(chip) && - (maf_id == NAND_MFR_SAMSUNG || - maf_id == NAND_MFR_HYNIX || - maf_id == NAND_MFR_TOSHIBA || - maf_id == NAND_MFR_AMD || - maf_id == NAND_MFR_MACRONIX)) || - (mtd->writesize == 2048 && - maf_id == NAND_MFR_MICRON)) - chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; } static inline bool is_full_id_nand(struct nand_flash_dev *type) @@ -4023,9 +3925,12 @@ static inline bool is_full_id_nand(struct nand_flash_dev *type) return type->id_len; } -static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip, - struct nand_flash_dev *type, u8 *id_data, int *busw) +static bool find_full_id_nand(struct nand_chip *chip, + struct nand_flash_dev *type) { + struct mtd_info *mtd = nand_to_mtd(chip); + u8 *id_data = chip->id.data; + if (!strncmp(type->id, id_data, type->id_len)) { mtd->writesize = type->pagesize; mtd->erasesize = type->erasesize; @@ -4039,8 +3944,6 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip, chip->onfi_timing_mode_default = type->onfi_timing_mode_default; - *busw = type->options & NAND_BUSWIDTH_16; - if (!mtd->name) mtd->name = type->name; @@ -4050,15 +3953,63 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip, } /* + * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC + * compliant and does not have a full-id or legacy-id entry in the nand_ids + * table. + */ +static void nand_manufacturer_detect(struct nand_chip *chip) +{ + /* + * Try manufacturer detection if available and use + * nand_decode_ext_id() otherwise. + */ + if (chip->manufacturer.desc && chip->manufacturer.desc->ops && + chip->manufacturer.desc->ops->detect) + chip->manufacturer.desc->ops->detect(chip); + else + nand_decode_ext_id(chip); +} + +/* + * Manufacturer initialization. This function is called for all NANDs including + * ONFI and JEDEC compliant ones. + * Manufacturer drivers should put all their specific initialization code in + * their ->init() hook. + */ +static int nand_manufacturer_init(struct nand_chip *chip) +{ + if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops || + !chip->manufacturer.desc->ops->init) + return 0; + + return chip->manufacturer.desc->ops->init(chip); +} + +/* + * Manufacturer cleanup. This function is called for all NANDs including + * ONFI and JEDEC compliant ones. + * Manufacturer drivers should put all their specific cleanup code in their + * ->cleanup() hook. + */ +static void nand_manufacturer_cleanup(struct nand_chip *chip) +{ + /* Release manufacturer private data */ + if (chip->manufacturer.desc && chip->manufacturer.desc->ops && + chip->manufacturer.desc->ops->cleanup) + chip->manufacturer.desc->ops->cleanup(chip); +} + +/* * Get the flash and manufacturer id and lookup if the type is supported. */ -static int nand_get_flash_type(struct mtd_info *mtd, struct nand_chip *chip, - int *maf_id, int *dev_id, - struct nand_flash_dev *type) +static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) { + const struct nand_manufacturer *manufacturer; + struct mtd_info *mtd = nand_to_mtd(chip); int busw; - int i, maf_idx; - u8 id_data[8]; + int i, ret; + u8 *id_data = chip->id.data; + u8 maf_id, dev_id; /* * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) @@ -4073,8 +4024,8 @@ static int nand_get_flash_type(struct mtd_info *mtd, struct nand_chip *chip, chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); /* Read manufacturer and device IDs */ - *maf_id = chip->read_byte(mtd); - *dev_id = chip->read_byte(mtd); + maf_id = chip->read_byte(mtd); + dev_id = chip->read_byte(mtd); /* * Try again to make sure, as some systems the bus-hold or other @@ -4089,20 +4040,41 @@ static int nand_get_flash_type(struct mtd_info *mtd, struct nand_chip *chip, for (i = 0; i < 8; i++) id_data[i] = chip->read_byte(mtd); - if (id_data[0] != *maf_id || id_data[1] != *dev_id) { + if (id_data[0] != maf_id || id_data[1] != dev_id) { pr_info("second ID read did not match %02x,%02x against %02x,%02x\n", - *maf_id, *dev_id, id_data[0], id_data[1]); + maf_id, dev_id, id_data[0], id_data[1]); return -ENODEV; } + chip->id.len = nand_id_len(id_data, 8); + + /* Try to identify manufacturer */ + manufacturer = nand_get_manufacturer(maf_id); + chip->manufacturer.desc = manufacturer; + if (!type) type = nand_flash_ids; + /* + * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic + * override it. + * This is required to make sure initial NAND bus width set by the + * NAND controller driver is coherent with the real NAND bus width + * (extracted by auto-detection code). + */ + busw = chip->options & NAND_BUSWIDTH_16; + + /* + * The flag is only set (never cleared), reset it to its default value + * before starting auto-detection. + */ + chip->options &= ~NAND_BUSWIDTH_16; + for (; type->name != NULL; type++) { if (is_full_id_nand(type)) { - if (find_full_id_nand(mtd, chip, type, id_data, &busw)) + if (find_full_id_nand(chip, type)) goto ident_done; - } else if (*dev_id == type->dev_id) { + } else if (dev_id == type->dev_id) { break; } } @@ -4110,11 +4082,11 @@ static int nand_get_flash_type(struct mtd_info *mtd, struct nand_chip *chip, chip->onfi_version = 0; if (!type->name || !type->pagesize) { /* Check if the chip is ONFI compliant */ - if (nand_flash_detect_onfi(mtd, chip, &busw)) + if (nand_flash_detect_onfi(chip)) goto ident_done; /* Check if the chip is JEDEC compliant */ - if (nand_flash_detect_jedec(mtd, chip, &busw)) + if (nand_flash_detect_jedec(chip)) goto ident_done; } @@ -4126,48 +4098,34 @@ static int nand_get_flash_type(struct mtd_info *mtd, struct nand_chip *chip, chip->chipsize = (uint64_t)type->chipsize << 20; - if (!type->pagesize) { - /* Decode parameters from extended ID */ - nand_decode_ext_id(mtd, chip, id_data, &busw); - } else { - nand_decode_id(mtd, chip, type, id_data, &busw); - } + if (!type->pagesize) + nand_manufacturer_detect(chip); + else + nand_decode_id(chip, type); + /* Get chip options */ chip->options |= type->options; - /* - * Check if chip is not a Samsung device. Do not clear the - * options for chips which do not have an extended id. - */ - if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize) - chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; ident_done: - /* Try to identify manufacturer */ - for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) { - if (nand_manuf_ids[maf_idx].id == *maf_id) - break; - } - if (chip->options & NAND_BUSWIDTH_AUTO) { - WARN_ON(chip->options & NAND_BUSWIDTH_16); - chip->options |= busw; - nand_set_defaults(chip, busw); + WARN_ON(busw & NAND_BUSWIDTH_16); + nand_set_defaults(chip); } else if (busw != (chip->options & NAND_BUSWIDTH_16)) { /* * Check, if buswidth is correct. Hardware drivers should set * chip correct! */ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", - *maf_id, *dev_id); - pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name); - pr_warn("bus width %d instead %d bit\n", - (chip->options & NAND_BUSWIDTH_16) ? 16 : 8, - busw ? 16 : 8); + maf_id, dev_id); + pr_info("%s %s\n", nand_manufacturer_name(manufacturer), + mtd->name); + pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8, + (chip->options & NAND_BUSWIDTH_16) ? 16 : 8); return -EINVAL; } - nand_decode_bbm_options(mtd, chip, id_data); + nand_decode_bbm_options(chip); /* Calculate the address shift from the page size */ chip->page_shift = ffs(mtd->writesize) - 1; @@ -4190,18 +4148,22 @@ ident_done: if (mtd->writesize > 512 && chip->cmdfunc == nand_command) chip->cmdfunc = nand_command_lp; + ret = nand_manufacturer_init(chip); + if (ret) + return ret; + pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", - *maf_id, *dev_id); + maf_id, dev_id); if (chip->onfi_version) - pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, - chip->onfi_params.model); + pr_info("%s %s\n", nand_manufacturer_name(manufacturer), + chip->onfi_params.model); else if (chip->jedec_version) - pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, - chip->jedec_params.model); + pr_info("%s %s\n", nand_manufacturer_name(manufacturer), + chip->jedec_params.model); else - pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, - type->name); + pr_info("%s %s\n", nand_manufacturer_name(manufacturer), + type->name); pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n", (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC", @@ -4333,12 +4295,6 @@ static int nand_dt_init(struct nand_chip *chip) ecc_strength = of_get_nand_ecc_strength(dn); ecc_step = of_get_nand_ecc_step_size(dn); - if ((ecc_step >= 0 && !(ecc_strength >= 0)) || - (!(ecc_step >= 0) && ecc_strength >= 0)) { - pr_err("must set both strength and step size in DT\n"); - return -EINVAL; - } - if (ecc_mode >= 0) chip->ecc.mode = ecc_mode; @@ -4391,10 +4347,10 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, return -EINVAL; } /* Set the default functions */ - nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16); + nand_set_defaults(chip); /* Read the flash type */ - ret = nand_get_flash_type(mtd, chip, &nand_maf_id, &nand_dev_id, table); + ret = nand_detect(chip, table); if (ret) { if (!(chip->options & NAND_SCAN_SILENT_NODEV)) pr_warn("No NAND device found\n"); @@ -4419,6 +4375,9 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, if (ret) return ret; + nand_maf_id = chip->id.data[0]; + nand_dev_id = chip->id.data[1]; + chip->select_chip(mtd, -1); /* Check for a chip array */ @@ -4610,7 +4569,7 @@ int nand_scan_tail(struct mtd_info *mtd) { struct nand_chip *chip = mtd_to_nand(mtd); struct nand_ecc_ctrl *ecc = &chip->ecc; - struct nand_buffers *nbuf; + struct nand_buffers *nbuf = NULL; int ret; /* New bad blocks should be marked in OOB, flash-based BBT, or both */ @@ -4624,13 +4583,28 @@ int nand_scan_tail(struct mtd_info *mtd) } if (!(chip->options & NAND_OWN_BUFFERS)) { - nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize - + mtd->oobsize * 3, GFP_KERNEL); + nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL); if (!nbuf) return -ENOMEM; - nbuf->ecccalc = (uint8_t *)(nbuf + 1); - nbuf->ecccode = nbuf->ecccalc + mtd->oobsize; - nbuf->databuf = nbuf->ecccode + mtd->oobsize; + + nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL); + if (!nbuf->ecccalc) { + ret = -ENOMEM; + goto err_free; + } + + nbuf->ecccode = kmalloc(mtd->oobsize, GFP_KERNEL); + if (!nbuf->ecccode) { + ret = -ENOMEM; + goto err_free; + } + + nbuf->databuf = kmalloc(mtd->writesize + mtd->oobsize, + GFP_KERNEL); + if (!nbuf->databuf) { + ret = -ENOMEM; + goto err_free; + } chip->buffers = nbuf; } else { @@ -4653,7 +4627,7 @@ int nand_scan_tail(struct mtd_info *mtd) break; case 64: case 128: - mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops); break; default: WARN(1, "No oob scheme defined for oobsize %d\n", @@ -4663,9 +4637,6 @@ int nand_scan_tail(struct mtd_info *mtd) } } - if (!chip->write_page) - chip->write_page = nand_write_page; - /* * Check ECC mode, default to software if 3byte/512byte hardware ECC is * selected and we have 256 byte pagesize fallback to software ECC @@ -4873,8 +4844,12 @@ int nand_scan_tail(struct mtd_info *mtd) /* Build bad block table */ return chip->scan_bbt(mtd); err_free: - if (!(chip->options & NAND_OWN_BUFFERS)) - kfree(chip->buffers); + if (nbuf) { + kfree(nbuf->databuf); + kfree(nbuf->ecccode); + kfree(nbuf->ecccalc); + kfree(nbuf); + } return ret; } EXPORT_SYMBOL(nand_scan_tail); @@ -4925,13 +4900,20 @@ void nand_cleanup(struct nand_chip *chip) /* Free bad block table memory */ kfree(chip->bbt); - if (!(chip->options & NAND_OWN_BUFFERS)) + if (!(chip->options & NAND_OWN_BUFFERS) && chip->buffers) { + kfree(chip->buffers->databuf); + kfree(chip->buffers->ecccode); + kfree(chip->buffers->ecccalc); kfree(chip->buffers); + } /* Free bad block descriptor memory */ if (chip->badblock_pattern && chip->badblock_pattern->options & NAND_BBT_DYNAMICSTRUCT) kfree(chip->badblock_pattern); + + /* Free manufacturer priv data. */ + nand_manufacturer_cleanup(chip); } EXPORT_SYMBOL_GPL(nand_cleanup); diff --git a/drivers/mtd/nand/nand_hynix.c b/drivers/mtd/nand/nand_hynix.c new file mode 100644 index 000000000000..b12dc7325378 --- /dev/null +++ b/drivers/mtd/nand/nand_hynix.c @@ -0,0 +1,631 @@ +/* + * Copyright (C) 2017 Free Electrons + * Copyright (C) 2017 NextThing Co + * + * Author: Boris Brezillon <boris.brezillon@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/mtd/nand.h> +#include <linux/sizes.h> +#include <linux/slab.h> + +#define NAND_HYNIX_CMD_SET_PARAMS 0x36 +#define NAND_HYNIX_CMD_APPLY_PARAMS 0x16 + +#define NAND_HYNIX_1XNM_RR_REPEAT 8 + +/** + * struct hynix_read_retry - read-retry data + * @nregs: number of register to set when applying a new read-retry mode + * @regs: register offsets (NAND chip dependent) + * @values: array of values to set in registers. The array size is equal to + * (nregs * nmodes) + */ +struct hynix_read_retry { + int nregs; + const u8 *regs; + u8 values[0]; +}; + +/** + * struct hynix_nand - private Hynix NAND struct + * @nand_technology: manufacturing process expressed in picometer + * @read_retry: read-retry information + */ +struct hynix_nand { + const struct hynix_read_retry *read_retry; +}; + +/** + * struct hynix_read_retry_otp - structure describing how the read-retry OTP + * area + * @nregs: number of hynix private registers to set before reading the reading + * the OTP area + * @regs: registers that should be configured + * @values: values that should be set in regs + * @page: the address to pass to the READ_PAGE command. Depends on the NAND + * chip + * @size: size of the read-retry OTP section + */ +struct hynix_read_retry_otp { + int nregs; + const u8 *regs; + const u8 *values; + int page; + int size; +}; + +static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + u8 jedecid[6] = { }; + int i = 0; + + chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1); + for (i = 0; i < 5; i++) + jedecid[i] = chip->read_byte(mtd); + + return !strcmp("JEDEC", jedecid); +} + +static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct hynix_nand *hynix = nand_get_manufacturer_data(chip); + const u8 *values; + int status; + int i; + + values = hynix->read_retry->values + + (retry_mode * hynix->read_retry->nregs); + + /* Enter 'Set Hynix Parameters' mode */ + chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1); + + /* + * Configure the NAND in the requested read-retry mode. + * This is done by setting pre-defined values in internal NAND + * registers. + * + * The set of registers is NAND specific, and the values are either + * predefined or extracted from an OTP area on the NAND (values are + * probably tweaked at production in this case). + */ + for (i = 0; i < hynix->read_retry->nregs; i++) { + int column = hynix->read_retry->regs[i]; + + column |= column << 8; + chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1); + chip->write_byte(mtd, values[i]); + } + + /* Apply the new settings. */ + chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1); + + status = chip->waitfunc(mtd, chip); + if (status & NAND_STATUS_FAIL) + return -EIO; + + return 0; +} + +/** + * hynix_get_majority - get the value that is occurring the most in a given + * set of values + * @in: the array of values to test + * @repeat: the size of the in array + * @out: pointer used to store the output value + * + * This function implements the 'majority check' logic that is supposed to + * overcome the unreliability of MLC NANDs when reading the OTP area storing + * the read-retry parameters. + * + * It's based on a pretty simple assumption: if we repeat the same value + * several times and then take the one that is occurring the most, we should + * find the correct value. + * Let's hope this dummy algorithm prevents us from losing the read-retry + * parameters. + */ +static int hynix_get_majority(const u8 *in, int repeat, u8 *out) +{ + int i, j, half = repeat / 2; + + /* + * We only test the first half of the in array because we must ensure + * that the value is at least occurring repeat / 2 times. + * + * This loop is suboptimal since we may count the occurrences of the + * same value several time, but we are doing that on small sets, which + * makes it acceptable. + */ + for (i = 0; i < half; i++) { + int cnt = 0; + u8 val = in[i]; + + /* Count all values that are matching the one at index i. */ + for (j = i + 1; j < repeat; j++) { + if (in[j] == val) + cnt++; + } + + /* We found a value occurring more than repeat / 2. */ + if (cnt > half) { + *out = val; + return 0; + } + } + + return -EIO; +} + +static int hynix_read_rr_otp(struct nand_chip *chip, + const struct hynix_read_retry_otp *info, + void *buf) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + int i; + + chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); + + chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1); + + for (i = 0; i < info->nregs; i++) { + int column = info->regs[i]; + + column |= column << 8; + chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1); + chip->write_byte(mtd, info->values[i]); + } + + chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1); + + /* Sequence to enter OTP mode? */ + chip->cmdfunc(mtd, 0x17, -1, -1); + chip->cmdfunc(mtd, 0x04, -1, -1); + chip->cmdfunc(mtd, 0x19, -1, -1); + + /* Now read the page */ + chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, info->page); + chip->read_buf(mtd, buf, info->size); + + /* Put everything back to normal */ + chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); + chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, 0x38, -1); + chip->write_byte(mtd, 0x0); + chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1); + chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, -1); + + return 0; +} + +#define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0 +#define NAND_HYNIX_1XNM_RR_REG_COUNT_OFFS 8 +#define NAND_HYNIX_1XNM_RR_SET_OFFS(x, setsize, inv) \ + (16 + ((((x) * 2) + ((inv) ? 1 : 0)) * (setsize))) + +static int hynix_mlc_1xnm_rr_value(const u8 *buf, int nmodes, int nregs, + int mode, int reg, bool inv, u8 *val) +{ + u8 tmp[NAND_HYNIX_1XNM_RR_REPEAT]; + int val_offs = (mode * nregs) + reg; + int set_size = nmodes * nregs; + int i, ret; + + for (i = 0; i < NAND_HYNIX_1XNM_RR_REPEAT; i++) { + int set_offs = NAND_HYNIX_1XNM_RR_SET_OFFS(i, set_size, inv); + + tmp[i] = buf[val_offs + set_offs]; + } + + ret = hynix_get_majority(tmp, NAND_HYNIX_1XNM_RR_REPEAT, val); + if (ret) + return ret; + + if (inv) + *val = ~*val; + + return 0; +} + +static u8 hynix_1xnm_mlc_read_retry_regs[] = { + 0xcc, 0xbf, 0xaa, 0xab, 0xcd, 0xad, 0xae, 0xaf +}; + +static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip, + const struct hynix_read_retry_otp *info) +{ + struct hynix_nand *hynix = nand_get_manufacturer_data(chip); + struct hynix_read_retry *rr = NULL; + int ret, i, j; + u8 nregs, nmodes; + u8 *buf; + + buf = kmalloc(info->size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = hynix_read_rr_otp(chip, info, buf); + if (ret) + goto out; + + ret = hynix_get_majority(buf, NAND_HYNIX_1XNM_RR_REPEAT, + &nmodes); + if (ret) + goto out; + + ret = hynix_get_majority(buf + NAND_HYNIX_1XNM_RR_REPEAT, + NAND_HYNIX_1XNM_RR_REPEAT, + &nregs); + if (ret) + goto out; + + rr = kzalloc(sizeof(*rr) + (nregs * nmodes), GFP_KERNEL); + if (!rr) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < nmodes; i++) { + for (j = 0; j < nregs; j++) { + u8 *val = rr->values + (i * nregs); + + ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j, + false, val); + if (!ret) + continue; + + ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j, + true, val); + if (ret) + goto out; + } + } + + rr->nregs = nregs; + rr->regs = hynix_1xnm_mlc_read_retry_regs; + hynix->read_retry = rr; + chip->setup_read_retry = hynix_nand_setup_read_retry; + chip->read_retries = nmodes; + +out: + kfree(buf); + + if (ret) + kfree(rr); + + return ret; +} + +static const u8 hynix_mlc_1xnm_rr_otp_regs[] = { 0x38 }; +static const u8 hynix_mlc_1xnm_rr_otp_values[] = { 0x52 }; + +static const struct hynix_read_retry_otp hynix_mlc_1xnm_rr_otps[] = { + { + .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs), + .regs = hynix_mlc_1xnm_rr_otp_regs, + .values = hynix_mlc_1xnm_rr_otp_values, + .page = 0x21f, + .size = 784 + }, + { + .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs), + .regs = hynix_mlc_1xnm_rr_otp_regs, + .values = hynix_mlc_1xnm_rr_otp_values, + .page = 0x200, + .size = 528, + }, +}; + +static int hynix_nand_rr_init(struct nand_chip *chip) +{ + int i, ret = 0; + bool valid_jedecid; + + valid_jedecid = hynix_nand_has_valid_jedecid(chip); + + /* + * We only support read-retry for 1xnm NANDs, and those NANDs all + * expose a valid JEDEC ID. + */ + if (valid_jedecid) { + u8 nand_tech = chip->id.data[5] >> 4; + + /* 1xnm technology */ + if (nand_tech == 4) { + for (i = 0; i < ARRAY_SIZE(hynix_mlc_1xnm_rr_otps); + i++) { + /* + * FIXME: Hynix recommend to copy the + * read-retry OTP area into a normal page. + */ + ret = hynix_mlc_1xnm_rr_init(chip, + hynix_mlc_1xnm_rr_otps); + if (!ret) + break; + } + } + } + + if (ret) + pr_warn("failed to initialize read-retry infrastructure"); + + return 0; +} + +static void hynix_nand_extract_oobsize(struct nand_chip *chip, + bool valid_jedecid) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + u8 oobsize; + + oobsize = ((chip->id.data[3] >> 2) & 0x3) | + ((chip->id.data[3] >> 4) & 0x4); + + if (valid_jedecid) { + switch (oobsize) { + case 0: + mtd->oobsize = 2048; + break; + case 1: + mtd->oobsize = 1664; + break; + case 2: + mtd->oobsize = 1024; + break; + case 3: + mtd->oobsize = 640; + break; + default: + /* + * We should never reach this case, but if that + * happens, this probably means Hynix decided to use + * a different extended ID format, and we should find + * a way to support it. + */ + WARN(1, "Invalid OOB size"); + break; + } + } else { + switch (oobsize) { + case 0: + mtd->oobsize = 128; + break; + case 1: + mtd->oobsize = 224; + break; + case 2: + mtd->oobsize = 448; + break; + case 3: + mtd->oobsize = 64; + break; + case 4: + mtd->oobsize = 32; + break; + case 5: + mtd->oobsize = 16; + break; + case 6: + mtd->oobsize = 640; + break; + default: + /* + * We should never reach this case, but if that + * happens, this probably means Hynix decided to use + * a different extended ID format, and we should find + * a way to support it. + */ + WARN(1, "Invalid OOB size"); + break; + } + } +} + +static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip, + bool valid_jedecid) +{ + u8 ecc_level = (chip->id.data[4] >> 4) & 0x7; + + if (valid_jedecid) { + /* Reference: H27UCG8T2E datasheet */ + chip->ecc_step_ds = 1024; + + switch (ecc_level) { + case 0: + chip->ecc_step_ds = 0; + chip->ecc_strength_ds = 0; + break; + case 1: + chip->ecc_strength_ds = 4; + break; + case 2: + chip->ecc_strength_ds = 24; + break; + case 3: + chip->ecc_strength_ds = 32; + break; + case 4: + chip->ecc_strength_ds = 40; + break; + case 5: + chip->ecc_strength_ds = 50; + break; + case 6: + chip->ecc_strength_ds = 60; + break; + default: + /* + * We should never reach this case, but if that + * happens, this probably means Hynix decided to use + * a different extended ID format, and we should find + * a way to support it. + */ + WARN(1, "Invalid ECC requirements"); + } + } else { + /* + * The ECC requirements field meaning depends on the + * NAND technology. + */ + u8 nand_tech = chip->id.data[5] & 0x3; + + if (nand_tech < 3) { + /* > 26nm, reference: H27UBG8T2A datasheet */ + if (ecc_level < 5) { + chip->ecc_step_ds = 512; + chip->ecc_strength_ds = 1 << ecc_level; + } else if (ecc_level < 7) { + if (ecc_level == 5) + chip->ecc_step_ds = 2048; + else + chip->ecc_step_ds = 1024; + chip->ecc_strength_ds = 24; + } else { + /* + * We should never reach this case, but if that + * happens, this probably means Hynix decided + * to use a different extended ID format, and + * we should find a way to support it. + */ + WARN(1, "Invalid ECC requirements"); + } + } else { + /* <= 26nm, reference: H27UBG8T2B datasheet */ + if (!ecc_level) { + chip->ecc_step_ds = 0; + chip->ecc_strength_ds = 0; + } else if (ecc_level < 5) { + chip->ecc_step_ds = 512; + chip->ecc_strength_ds = 1 << (ecc_level - 1); + } else { + chip->ecc_step_ds = 1024; + chip->ecc_strength_ds = 24 + + (8 * (ecc_level - 5)); + } + } + } +} + +static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip, + bool valid_jedecid) +{ + u8 nand_tech; + + /* We need scrambling on all TLC NANDs*/ + if (chip->bits_per_cell > 2) + chip->options |= NAND_NEED_SCRAMBLING; + + /* And on MLC NANDs with sub-3xnm process */ + if (valid_jedecid) { + nand_tech = chip->id.data[5] >> 4; + + /* < 3xnm */ + if (nand_tech > 0) + chip->options |= NAND_NEED_SCRAMBLING; + } else { + nand_tech = chip->id.data[5] & 0x3; + + /* < 32nm */ + if (nand_tech > 2) + chip->options |= NAND_NEED_SCRAMBLING; + } +} + +static void hynix_nand_decode_id(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + bool valid_jedecid; + u8 tmp; + + /* + * Exclude all SLC NANDs from this advanced detection scheme. + * According to the ranges defined in several datasheets, it might + * appear that even SLC NANDs could fall in this extended ID scheme. + * If that the case rework the test to let SLC NANDs go through the + * detection process. + */ + if (chip->id.len < 6 || nand_is_slc(chip)) { + nand_decode_ext_id(chip); + return; + } + + /* Extract pagesize */ + mtd->writesize = 2048 << (chip->id.data[3] & 0x03); + + tmp = (chip->id.data[3] >> 4) & 0x3; + /* + * When bit7 is set that means we start counting at 1MiB, otherwise + * we start counting at 128KiB and shift this value the content of + * ID[3][4:5]. + * The only exception is when ID[3][4:5] == 3 and ID[3][7] == 0, in + * this case the erasesize is set to 768KiB. + */ + if (chip->id.data[3] & 0x80) + mtd->erasesize = SZ_1M << tmp; + else if (tmp == 3) + mtd->erasesize = SZ_512K + SZ_256K; + else + mtd->erasesize = SZ_128K << tmp; + + /* + * Modern Toggle DDR NANDs have a valid JEDECID even though they are + * not exposing a valid JEDEC parameter table. + * These NANDs use a different NAND ID scheme. + */ + valid_jedecid = hynix_nand_has_valid_jedecid(chip); + + hynix_nand_extract_oobsize(chip, valid_jedecid); + hynix_nand_extract_ecc_requirements(chip, valid_jedecid); + hynix_nand_extract_scrambling_requirements(chip, valid_jedecid); +} + +static void hynix_nand_cleanup(struct nand_chip *chip) +{ + struct hynix_nand *hynix = nand_get_manufacturer_data(chip); + + if (!hynix) + return; + + kfree(hynix->read_retry); + kfree(hynix); + nand_set_manufacturer_data(chip, NULL); +} + +static int hynix_nand_init(struct nand_chip *chip) +{ + struct hynix_nand *hynix; + int ret; + + if (!nand_is_slc(chip)) + chip->bbt_options |= NAND_BBT_SCANLASTPAGE; + else + chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; + + hynix = kzalloc(sizeof(*hynix), GFP_KERNEL); + if (!hynix) + return -ENOMEM; + + nand_set_manufacturer_data(chip, hynix); + + ret = hynix_nand_rr_init(chip); + if (ret) + hynix_nand_cleanup(chip); + + return ret; +} + +const struct nand_manufacturer_ops hynix_nand_manuf_ops = { + .detect = hynix_nand_decode_id, + .init = hynix_nand_init, + .cleanup = hynix_nand_cleanup, +}; diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 4a2f75b0c200..9d5ca0e540b5 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -10,7 +10,7 @@ #include <linux/mtd/nand.h> #include <linux/sizes.h> -#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS +#define LP_OPTIONS 0 #define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) #define SP_OPTIONS NAND_NEED_READRDY @@ -169,29 +169,40 @@ struct nand_flash_dev nand_flash_ids[] = { }; /* Manufacturer IDs */ -struct nand_manufacturers nand_manuf_ids[] = { - {NAND_MFR_TOSHIBA, "Toshiba"}, +static const struct nand_manufacturer nand_manufacturers[] = { + {NAND_MFR_TOSHIBA, "Toshiba", &toshiba_nand_manuf_ops}, {NAND_MFR_ESMT, "ESMT"}, - {NAND_MFR_SAMSUNG, "Samsung"}, + {NAND_MFR_SAMSUNG, "Samsung", &samsung_nand_manuf_ops}, {NAND_MFR_FUJITSU, "Fujitsu"}, {NAND_MFR_NATIONAL, "National"}, {NAND_MFR_RENESAS, "Renesas"}, {NAND_MFR_STMICRO, "ST Micro"}, - {NAND_MFR_HYNIX, "Hynix"}, - {NAND_MFR_MICRON, "Micron"}, - {NAND_MFR_AMD, "AMD/Spansion"}, - {NAND_MFR_MACRONIX, "Macronix"}, + {NAND_MFR_HYNIX, "Hynix", &hynix_nand_manuf_ops}, + {NAND_MFR_MICRON, "Micron", µn_nand_manuf_ops}, + {NAND_MFR_AMD, "AMD/Spansion", &amd_nand_manuf_ops}, + {NAND_MFR_MACRONIX, "Macronix", ¯onix_nand_manuf_ops}, {NAND_MFR_EON, "Eon"}, {NAND_MFR_SANDISK, "SanDisk"}, {NAND_MFR_INTEL, "Intel"}, {NAND_MFR_ATO, "ATO"}, {NAND_MFR_WINBOND, "Winbond"}, - {0x0, "Unknown"} }; -EXPORT_SYMBOL(nand_manuf_ids); -EXPORT_SYMBOL(nand_flash_ids); +/** + * nand_get_manufacturer - Get manufacturer information from the manufacturer + * ID + * @id: manufacturer ID + * + * Returns a pointer a nand_manufacturer object if the manufacturer is defined + * in the NAND manufacturers database, NULL otherwise. + */ +const struct nand_manufacturer *nand_get_manufacturer(u8 id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(nand_manufacturers); i++) + if (nand_manufacturers[i].id == id) + return &nand_manufacturers[i]; -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); -MODULE_DESCRIPTION("Nand device & manufacturer IDs"); + return NULL; +} diff --git a/drivers/mtd/nand/nand_macronix.c b/drivers/mtd/nand/nand_macronix.c new file mode 100644 index 000000000000..84855c3e1a02 --- /dev/null +++ b/drivers/mtd/nand/nand_macronix.c @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2017 Free Electrons + * Copyright (C) 2017 NextThing Co + * + * Author: Boris Brezillon <boris.brezillon@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/mtd/nand.h> + +static int macronix_nand_init(struct nand_chip *chip) +{ + if (nand_is_slc(chip)) + chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; + + return 0; +} + +const struct nand_manufacturer_ops macronix_nand_manuf_ops = { + .init = macronix_nand_init, +}; diff --git a/drivers/mtd/nand/nand_micron.c b/drivers/mtd/nand/nand_micron.c new file mode 100644 index 000000000000..877011069251 --- /dev/null +++ b/drivers/mtd/nand/nand_micron.c @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2017 Free Electrons + * Copyright (C) 2017 NextThing Co + * + * Author: Boris Brezillon <boris.brezillon@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/mtd/nand.h> + +struct nand_onfi_vendor_micron { + u8 two_plane_read; + u8 read_cache; + u8 read_unique_id; + u8 dq_imped; + u8 dq_imped_num_settings; + u8 dq_imped_feat_addr; + u8 rb_pulldown_strength; + u8 rb_pulldown_strength_feat_addr; + u8 rb_pulldown_strength_num_settings; + u8 otp_mode; + u8 otp_page_start; + u8 otp_data_prot_addr; + u8 otp_num_pages; + u8 otp_feat_addr; + u8 read_retry_options; + u8 reserved[72]; + u8 param_revision; +} __packed; + +static int micron_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode}; + + return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY, + feature); +} + +/* + * Configure chip properties from Micron vendor-specific ONFI table + */ +static int micron_nand_onfi_init(struct nand_chip *chip) +{ + struct nand_onfi_params *p = &chip->onfi_params; + struct nand_onfi_vendor_micron *micron = (void *)p->vendor; + + if (!chip->onfi_version) + return 0; + + if (le16_to_cpu(p->vendor_revision) < 1) + return 0; + + chip->read_retries = micron->read_retry_options; + chip->setup_read_retry = micron_nand_setup_read_retry; + + return 0; +} + +static int micron_nand_init(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; + + ret = micron_nand_onfi_init(chip); + if (ret) + return ret; + + if (mtd->writesize == 2048) + chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; + + return 0; +} + +const struct nand_manufacturer_ops micron_nand_manuf_ops = { + .init = micron_nand_init, +}; diff --git a/drivers/mtd/nand/nand_samsung.c b/drivers/mtd/nand/nand_samsung.c new file mode 100644 index 000000000000..9cfc4035a420 --- /dev/null +++ b/drivers/mtd/nand/nand_samsung.c @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2017 Free Electrons + * Copyright (C) 2017 NextThing Co + * + * Author: Boris Brezillon <boris.brezillon@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/mtd/nand.h> + +static void samsung_nand_decode_id(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + + /* New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44) */ + if (chip->id.len == 6 && !nand_is_slc(chip) && + chip->id.data[5] != 0x00) { + u8 extid = chip->id.data[3]; + + /* Get pagesize */ + mtd->writesize = 2048 << (extid & 0x03); + + extid >>= 2; + + /* Get oobsize */ + switch (((extid >> 2) & 0x4) | (extid & 0x3)) { + case 1: + mtd->oobsize = 128; + break; + case 2: + mtd->oobsize = 218; + break; + case 3: + mtd->oobsize = 400; + break; + case 4: + mtd->oobsize = 436; + break; + case 5: + mtd->oobsize = 512; + break; + case 6: + mtd->oobsize = 640; + break; + default: + /* + * We should never reach this case, but if that + * happens, this probably means Samsung decided to use + * a different extended ID format, and we should find + * a way to support it. + */ + WARN(1, "Invalid OOB size value"); + break; + } + + /* Get blocksize */ + extid >>= 2; + mtd->erasesize = (128 * 1024) << + (((extid >> 1) & 0x04) | (extid & 0x03)); + + /* Extract ECC requirements from 5th id byte*/ + extid = (chip->id.data[4] >> 4) & 0x07; + if (extid < 5) { + chip->ecc_step_ds = 512; + chip->ecc_strength_ds = 1 << extid; + } else { + chip->ecc_step_ds = 1024; + switch (extid) { + case 5: + chip->ecc_strength_ds = 24; + break; + case 6: + chip->ecc_strength_ds = 40; + break; + case 7: + chip->ecc_strength_ds = 60; + break; + } + } + } else { + nand_decode_ext_id(chip); + } +} + +static int samsung_nand_init(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + + if (mtd->writesize > 512) + chip->options |= NAND_SAMSUNG_LP_OPTIONS; + + if (!nand_is_slc(chip)) + chip->bbt_options |= NAND_BBT_SCANLASTPAGE; + else + chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; + + return 0; +} + +const struct nand_manufacturer_ops samsung_nand_manuf_ops = { + .detect = samsung_nand_decode_id, + .init = samsung_nand_init, +}; diff --git a/drivers/mtd/nand/nand_toshiba.c b/drivers/mtd/nand/nand_toshiba.c new file mode 100644 index 000000000000..fa787ba38dcd --- /dev/null +++ b/drivers/mtd/nand/nand_toshiba.c @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2017 Free Electrons + * Copyright (C) 2017 NextThing Co + * + * Author: Boris Brezillon <boris.brezillon@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/mtd/nand.h> + +static void toshiba_nand_decode_id(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + + nand_decode_ext_id(chip); + + /* + * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per + * 512B page. For Toshiba SLC, we decode the 5th/6th byte as + * follows: + * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm, + * 110b -> 24nm + * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC + */ + if (chip->id.len >= 6 && nand_is_slc(chip) && + (chip->id.data[5] & 0x7) == 0x6 /* 24nm */ && + !(chip->id.data[4] & 0x80) /* !BENAND */) + mtd->oobsize = 32 * mtd->writesize >> 9; +} + +static int toshiba_nand_init(struct nand_chip *chip) +{ + if (nand_is_slc(chip)) + chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; + + return 0; +} + +const struct nand_manufacturer_ops toshiba_nand_manuf_ops = { + .detect = toshiba_nand_decode_id, + .init = toshiba_nand_init, +}; diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 092c9bd225be..03a0d057bf2f 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -902,7 +902,7 @@ static int parse_weakpages(void) zero_ok = (*w == '0' ? 1 : 0); page_no = simple_strtoul(w, &w, 0); if (!zero_ok && !page_no) { - NS_ERR("invalid weakpagess.\n"); + NS_ERR("invalid weakpages.\n"); return -EINVAL; } max_writes = 3; diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 2a52101120d4..084934a9f19c 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -1856,6 +1856,15 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.priv = NULL; nand_set_flash_node(nand_chip, dev->of_node); + if (!mtd->name) { + mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "omap2-nand.%d", info->gpmc_cs); + if (!mtd->name) { + dev_err(&pdev->dev, "Failed to set MTD name\n"); + return -ENOMEM; + } + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(nand_chip->IO_ADDR_R)) diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index 4a91c5d000be..f8e463a97b9e 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c @@ -23,6 +23,11 @@ #include <asm/sizes.h> #include <linux/platform_data/mtd-orion_nand.h> +struct orion_nand_info { + struct nand_chip chip; + struct clk *clk; +}; + static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *nc = mtd_to_nand(mtd); @@ -75,20 +80,21 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) static int __init orion_nand_probe(struct platform_device *pdev) { + struct orion_nand_info *info; struct mtd_info *mtd; struct nand_chip *nc; struct orion_nand_data *board; struct resource *res; - struct clk *clk; void __iomem *io_base; int ret = 0; u32 val = 0; - nc = devm_kzalloc(&pdev->dev, - sizeof(struct nand_chip), + info = devm_kzalloc(&pdev->dev, + sizeof(struct orion_nand_info), GFP_KERNEL); - if (!nc) + if (!info) return -ENOMEM; + nc = &info->chip; mtd = nand_to_mtd(nc); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -145,16 +151,23 @@ static int __init orion_nand_probe(struct platform_device *pdev) if (board->dev_ready) nc->dev_ready = board->dev_ready; - platform_set_drvdata(pdev, mtd); + platform_set_drvdata(pdev, info); /* Not all platforms can gate the clock, so it is not an error if the clock does not exists. */ - clk = clk_get(&pdev->dev, NULL); - if (!IS_ERR(clk)) { - clk_prepare_enable(clk); - clk_put(clk); + info->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(info->clk)) { + ret = PTR_ERR(info->clk); + if (ret == -ENOENT) { + info->clk = NULL; + } else { + dev_err(&pdev->dev, "failed to get clock!\n"); + return ret; + } } + clk_prepare_enable(info->clk); + ret = nand_scan(mtd, 1); if (ret) goto no_dev; @@ -169,26 +182,19 @@ static int __init orion_nand_probe(struct platform_device *pdev) return 0; no_dev: - if (!IS_ERR(clk)) { - clk_disable_unprepare(clk); - clk_put(clk); - } - + clk_disable_unprepare(info->clk); return ret; } static int orion_nand_remove(struct platform_device *pdev) { - struct mtd_info *mtd = platform_get_drvdata(pdev); - struct clk *clk; + struct orion_nand_info *info = platform_get_drvdata(pdev); + struct nand_chip *chip = &info->chip; + struct mtd_info *mtd = nand_to_mtd(chip); nand_release(mtd); - clk = clk_get(&pdev->dev, NULL); - if (!IS_ERR(clk)) { - clk_disable_unprepare(clk); - clk_put(clk); - } + clk_disable_unprepare(info->clk); return 0; } diff --git a/drivers/mtd/nand/oxnas_nand.c b/drivers/mtd/nand/oxnas_nand.c index 3e3bf3b364d2..1b207aac840c 100644 --- a/drivers/mtd/nand/oxnas_nand.c +++ b/drivers/mtd/nand/oxnas_nand.c @@ -91,7 +91,7 @@ static int oxnas_nand_probe(struct platform_device *pdev) int err = 0; /* Allocate memory for the device structure (and zero it) */ - oxnas = devm_kzalloc(&pdev->dev, sizeof(struct nand_chip), + oxnas = devm_kzalloc(&pdev->dev, sizeof(*oxnas), GFP_KERNEL); if (!oxnas) return -ENOMEM; diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index 0eeeb8b889ea..118a26fff368 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c @@ -2212,17 +2212,17 @@ static int sunxi_nfc_probe(struct platform_device *pdev) goto out_ahb_clk_unprepare; nfc->reset = devm_reset_control_get_optional(dev, "ahb"); - if (!IS_ERR(nfc->reset)) { - ret = reset_control_deassert(nfc->reset); - if (ret) { - dev_err(dev, "reset err %d\n", ret); - goto out_mod_clk_unprepare; - } - } else if (PTR_ERR(nfc->reset) != -ENOENT) { + if (IS_ERR(nfc->reset)) { ret = PTR_ERR(nfc->reset); goto out_mod_clk_unprepare; } + ret = reset_control_deassert(nfc->reset); + if (ret) { + dev_err(dev, "reset err %d\n", ret); + goto out_mod_clk_unprepare; + } + ret = sunxi_nfc_rst(nfc); if (ret) goto out_ahb_reset_reassert; @@ -2262,8 +2262,7 @@ out_release_dmac: if (nfc->dmac) dma_release_channel(nfc->dmac); out_ahb_reset_reassert: - if (!IS_ERR(nfc->reset)) - reset_control_assert(nfc->reset); + reset_control_assert(nfc->reset); out_mod_clk_unprepare: clk_disable_unprepare(nfc->mod_clk); out_ahb_clk_unprepare: @@ -2278,8 +2277,7 @@ static int sunxi_nfc_remove(struct platform_device *pdev) sunxi_nand_chips_cleanup(nfc); - if (!IS_ERR(nfc->reset)) - reset_control_assert(nfc->reset); + reset_control_assert(nfc->reset); if (nfc->dmac) dma_release_channel(nfc->dmac); diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c index 4a5e948c62df..05b6e1065203 100644 --- a/drivers/mtd/nand/tango_nand.c +++ b/drivers/mtd/nand/tango_nand.c @@ -223,12 +223,13 @@ static void tango_dma_callback(void *arg) complete(arg); } -static int do_dma(struct tango_nfc *nfc, int dir, int cmd, const void *buf, - int len, int page) +static int do_dma(struct tango_nfc *nfc, enum dma_data_direction dir, int cmd, + const void *buf, int len, int page) { void __iomem *addr = nfc->reg_base + NFC_STATUS; struct dma_chan *chan = nfc->chan; struct dma_async_tx_descriptor *desc; + enum dma_transfer_direction tdir; struct scatterlist sg; struct completion tx_done; int err = -EIO; @@ -238,7 +239,8 @@ static int do_dma(struct tango_nfc *nfc, int dir, int cmd, const void *buf, if (dma_map_sg(chan->device->dev, &sg, 1, dir) != 1) return -EIO; - desc = dmaengine_prep_slave_sg(chan, &sg, 1, dir, DMA_PREP_INTERRUPT); + tdir = dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; + desc = dmaengine_prep_slave_sg(chan, &sg, 1, tdir, DMA_PREP_INTERRUPT); if (!desc) goto dma_unmap; diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c index 464470122493..2861c7079d7b 100644 --- a/drivers/mtd/ofpart.c +++ b/drivers/mtd/ofpart.c @@ -166,8 +166,8 @@ static int parse_ofoldpart_partitions(struct mtd_info *master, if (!part) return 0; /* No partitions found */ - pr_warning("Device tree uses obsolete partition map binding: %s\n", - dp->full_name); + pr_warn("Device tree uses obsolete partition map binding: %s\n", + dp->full_name); nr_parts = plen / sizeof(part[0]); diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index 7252087ef407..bfdfb1e72b38 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig @@ -106,4 +106,11 @@ config SPI_INTEL_SPI_PLATFORM To compile this driver as a module, choose M here: the module will be called intel-spi-platform. +config SPI_STM32_QUADSPI + tristate "STM32 Quad SPI controller" + depends on ARCH_STM32 + help + This enables support for the STM32 Quad SPI controller. + We only connect the NOR to this controller. + endif # MTD_SPI_NOR diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile index 72238a793198..285aab86c7ca 100644 --- a/drivers/mtd/spi-nor/Makefile +++ b/drivers/mtd/spi-nor/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o +obj-$(CONFIG_SPI_STM32_QUADSPI) += stm32-quadspi.o
\ No newline at end of file diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c index 20378b0d55e9..a286350627a6 100644 --- a/drivers/mtd/spi-nor/hisi-sfc.c +++ b/drivers/mtd/spi-nor/hisi-sfc.c @@ -448,8 +448,11 @@ static int hisi_spi_nor_probe(struct platform_device *pdev) if (!host->buffer) return -ENOMEM; + ret = clk_prepare_enable(host->clk); + if (ret) + return ret; + mutex_init(&host->lock); - clk_prepare_enable(host->clk); hisi_spi_nor_init(host); ret = hisi_spi_nor_register_all(host); if (ret) diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c index a10f6027b386..986a3d020a3a 100644 --- a/drivers/mtd/spi-nor/intel-spi.c +++ b/drivers/mtd/spi-nor/intel-spi.c @@ -704,7 +704,7 @@ static void intel_spi_fill_partition(struct intel_spi *ispi, * whole partition read-only to be on the safe side. */ if (intel_spi_is_protected(ispi, base, limit)) - ispi->writeable = 0; + ispi->writeable = false; end = (limit << 12) + 4096; if (end > part->size) @@ -728,7 +728,7 @@ struct intel_spi *intel_spi_probe(struct device *dev, ispi->base = devm_ioremap_resource(dev, mem); if (IS_ERR(ispi->base)) - return ispi->base; + return ERR_CAST(ispi->base); ispi->dev = dev; ispi->info = info; diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c index e661877c23de..b6377707ce32 100644 --- a/drivers/mtd/spi-nor/mtk-quadspi.c +++ b/drivers/mtd/spi-nor/mtk-quadspi.c @@ -104,6 +104,8 @@ #define MTK_NOR_MAX_RX_TX_SHIFT 6 /* can shift up to 56 bits (7 bytes) transfer by MTK_NOR_PRG_CMD */ #define MTK_NOR_MAX_SHIFT 7 +/* nor controller 4-byte address mode enable bit */ +#define MTK_NOR_4B_ADDR_EN BIT(4) /* Helpers for accessing the program data / shift data registers */ #define MTK_NOR_PRG_REG(n) (MTK_NOR_PRGDATA0_REG + 4 * (n)) @@ -230,10 +232,35 @@ static int mt8173_nor_write_buffer_disable(struct mt8173_nor *mt8173_nor) 10000); } +static void mt8173_nor_set_addr_width(struct mt8173_nor *mt8173_nor) +{ + u8 val; + struct spi_nor *nor = &mt8173_nor->nor; + + val = readb(mt8173_nor->base + MTK_NOR_DUAL_REG); + + switch (nor->addr_width) { + case 3: + val &= ~MTK_NOR_4B_ADDR_EN; + break; + case 4: + val |= MTK_NOR_4B_ADDR_EN; + break; + default: + dev_warn(mt8173_nor->dev, "Unexpected address width %u.\n", + nor->addr_width); + break; + } + + writeb(val, mt8173_nor->base + MTK_NOR_DUAL_REG); +} + static void mt8173_nor_set_addr(struct mt8173_nor *mt8173_nor, u32 addr) { int i; + mt8173_nor_set_addr_width(mt8173_nor); + for (i = 0; i < 3; i++) { writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR0_REG + i * 4); addr >>= 8; diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 747645c74134..dea8c9cbadf0 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -85,6 +85,7 @@ struct flash_info { * Use dedicated 4byte address op codes * to support memory size above 128Mib. */ +#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */ }; #define JEDEC_MFR(info) ((info)->id[0]) @@ -960,6 +961,8 @@ static const struct flash_info spi_nor_ids[] = { /* ESMT */ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) }, + { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) }, + { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) }, /* Everspin */ { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, @@ -1013,11 +1016,14 @@ static const struct flash_info spi_nor_ids[] = { { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) }, { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) }, { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) }, + { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) }, + { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) }, + { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) }, { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) }, { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) }, - { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K) }, + { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) }, { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) }, { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) }, @@ -1031,10 +1037,11 @@ static const struct flash_info spi_nor_ids[] = { { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) }, { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) }, { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) }, { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, - { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, - { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, + { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, + { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, /* PMC */ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) }, @@ -1128,6 +1135,9 @@ static const struct flash_info spi_nor_ids[] = { { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) }, { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, + { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) }, + { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) }, + { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) }, { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, @@ -1629,6 +1639,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode) nor->flags |= SNOR_F_USE_FSR; if (info->flags & SPI_NOR_HAS_TB) nor->flags |= SNOR_F_HAS_SR_TB; + if (info->flags & NO_CHIP_ERASE) + nor->flags |= SNOR_F_NO_OP_CHIP_ERASE; #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS /* prefer "small sector" erase if possible */ diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c new file mode 100644 index 000000000000..ae45f81b8cd3 --- /dev/null +++ b/drivers/mtd/spi-nor/stm32-quadspi.c @@ -0,0 +1,693 @@ +/* + * stm32_quadspi.c + * + * Copyright (C) 2017, Ludovic Barre + * + * License terms: GNU General Public License (GPL), version 2 + */ +#include <linux/clk.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/spi-nor.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#define QUADSPI_CR 0x00 +#define CR_EN BIT(0) +#define CR_ABORT BIT(1) +#define CR_DMAEN BIT(2) +#define CR_TCEN BIT(3) +#define CR_SSHIFT BIT(4) +#define CR_DFM BIT(6) +#define CR_FSEL BIT(7) +#define CR_FTHRES_SHIFT 8 +#define CR_FTHRES_MASK GENMASK(12, 8) +#define CR_FTHRES(n) (((n) << CR_FTHRES_SHIFT) & CR_FTHRES_MASK) +#define CR_TEIE BIT(16) +#define CR_TCIE BIT(17) +#define CR_FTIE BIT(18) +#define CR_SMIE BIT(19) +#define CR_TOIE BIT(20) +#define CR_PRESC_SHIFT 24 +#define CR_PRESC_MASK GENMASK(31, 24) +#define CR_PRESC(n) (((n) << CR_PRESC_SHIFT) & CR_PRESC_MASK) + +#define QUADSPI_DCR 0x04 +#define DCR_CSHT_SHIFT 8 +#define DCR_CSHT_MASK GENMASK(10, 8) +#define DCR_CSHT(n) (((n) << DCR_CSHT_SHIFT) & DCR_CSHT_MASK) +#define DCR_FSIZE_SHIFT 16 +#define DCR_FSIZE_MASK GENMASK(20, 16) +#define DCR_FSIZE(n) (((n) << DCR_FSIZE_SHIFT) & DCR_FSIZE_MASK) + +#define QUADSPI_SR 0x08 +#define SR_TEF BIT(0) +#define SR_TCF BIT(1) +#define SR_FTF BIT(2) +#define SR_SMF BIT(3) +#define SR_TOF BIT(4) +#define SR_BUSY BIT(5) +#define SR_FLEVEL_SHIFT 8 +#define SR_FLEVEL_MASK GENMASK(13, 8) + +#define QUADSPI_FCR 0x0c +#define FCR_CTCF BIT(1) + +#define QUADSPI_DLR 0x10 + +#define QUADSPI_CCR 0x14 +#define CCR_INST_SHIFT 0 +#define CCR_INST_MASK GENMASK(7, 0) +#define CCR_INST(n) (((n) << CCR_INST_SHIFT) & CCR_INST_MASK) +#define CCR_IMODE_NONE (0U << 8) +#define CCR_IMODE_1 (1U << 8) +#define CCR_IMODE_2 (2U << 8) +#define CCR_IMODE_4 (3U << 8) +#define CCR_ADMODE_NONE (0U << 10) +#define CCR_ADMODE_1 (1U << 10) +#define CCR_ADMODE_2 (2U << 10) +#define CCR_ADMODE_4 (3U << 10) +#define CCR_ADSIZE_SHIFT 12 +#define CCR_ADSIZE_MASK GENMASK(13, 12) +#define CCR_ADSIZE(n) (((n) << CCR_ADSIZE_SHIFT) & CCR_ADSIZE_MASK) +#define CCR_ABMODE_NONE (0U << 14) +#define CCR_ABMODE_1 (1U << 14) +#define CCR_ABMODE_2 (2U << 14) +#define CCR_ABMODE_4 (3U << 14) +#define CCR_ABSIZE_8 (0U << 16) +#define CCR_ABSIZE_16 (1U << 16) +#define CCR_ABSIZE_24 (2U << 16) +#define CCR_ABSIZE_32 (3U << 16) +#define CCR_DCYC_SHIFT 18 +#define CCR_DCYC_MASK GENMASK(22, 18) +#define CCR_DCYC(n) (((n) << CCR_DCYC_SHIFT) & CCR_DCYC_MASK) +#define CCR_DMODE_NONE (0U << 24) +#define CCR_DMODE_1 (1U << 24) +#define CCR_DMODE_2 (2U << 24) +#define CCR_DMODE_4 (3U << 24) +#define CCR_FMODE_INDW (0U << 26) +#define CCR_FMODE_INDR (1U << 26) +#define CCR_FMODE_APM (2U << 26) +#define CCR_FMODE_MM (3U << 26) + +#define QUADSPI_AR 0x18 +#define QUADSPI_ABR 0x1c +#define QUADSPI_DR 0x20 +#define QUADSPI_PSMKR 0x24 +#define QUADSPI_PSMAR 0x28 +#define QUADSPI_PIR 0x2c +#define QUADSPI_LPTR 0x30 +#define LPTR_DFT_TIMEOUT 0x10 + +#define FSIZE_VAL(size) (__fls(size) - 1) + +#define STM32_MAX_MMAP_SZ SZ_256M +#define STM32_MAX_NORCHIP 2 + +#define STM32_QSPI_FIFO_TIMEOUT_US 30000 +#define STM32_QSPI_BUSY_TIMEOUT_US 100000 + +struct stm32_qspi_flash { + struct spi_nor nor; + struct stm32_qspi *qspi; + u32 cs; + u32 fsize; + u32 presc; + u32 read_mode; + bool registered; +}; + +struct stm32_qspi { + struct device *dev; + void __iomem *io_base; + void __iomem *mm_base; + resource_size_t mm_size; + u32 nor_num; + struct clk *clk; + u32 clk_rate; + struct stm32_qspi_flash flash[STM32_MAX_NORCHIP]; + struct completion cmd_completion; + + /* + * to protect device configuration, could be different between + * 2 flash access (bk1, bk2) + */ + struct mutex lock; +}; + +struct stm32_qspi_cmd { + u8 addr_width; + u8 dummy; + bool tx_data; + u8 opcode; + u32 framemode; + u32 qspimode; + u32 addr; + size_t len; + void *buf; +}; + +static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi) +{ + u32 cr; + int err = 0; + + if (readl_relaxed(qspi->io_base + QUADSPI_SR) & SR_TCF) + return 0; + + reinit_completion(&qspi->cmd_completion); + cr = readl_relaxed(qspi->io_base + QUADSPI_CR); + writel_relaxed(cr | CR_TCIE, qspi->io_base + QUADSPI_CR); + + if (!wait_for_completion_interruptible_timeout(&qspi->cmd_completion, + msecs_to_jiffies(1000))) + err = -ETIMEDOUT; + + writel_relaxed(cr, qspi->io_base + QUADSPI_CR); + return err; +} + +static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi) +{ + u32 sr; + + return readl_relaxed_poll_timeout(qspi->io_base + QUADSPI_SR, sr, + !(sr & SR_BUSY), 10, + STM32_QSPI_BUSY_TIMEOUT_US); +} + +static void stm32_qspi_set_framemode(struct spi_nor *nor, + struct stm32_qspi_cmd *cmd, bool read) +{ + u32 dmode = CCR_DMODE_1; + + cmd->framemode = CCR_IMODE_1; + + if (read) { + switch (nor->flash_read) { + case SPI_NOR_NORMAL: + case SPI_NOR_FAST: + dmode = CCR_DMODE_1; + break; + case SPI_NOR_DUAL: + dmode = CCR_DMODE_2; + break; + case SPI_NOR_QUAD: + dmode = CCR_DMODE_4; + break; + } + } + + cmd->framemode |= cmd->tx_data ? dmode : 0; + cmd->framemode |= cmd->addr_width ? CCR_ADMODE_1 : 0; +} + +static void stm32_qspi_read_fifo(u8 *val, void __iomem *addr) +{ + *val = readb_relaxed(addr); +} + +static void stm32_qspi_write_fifo(u8 *val, void __iomem *addr) +{ + writeb_relaxed(*val, addr); +} + +static int stm32_qspi_tx_poll(struct stm32_qspi *qspi, + const struct stm32_qspi_cmd *cmd) +{ + void (*tx_fifo)(u8 *, void __iomem *); + u32 len = cmd->len, sr; + u8 *buf = cmd->buf; + int ret; + + if (cmd->qspimode == CCR_FMODE_INDW) + tx_fifo = stm32_qspi_write_fifo; + else + tx_fifo = stm32_qspi_read_fifo; + + while (len--) { + ret = readl_relaxed_poll_timeout(qspi->io_base + QUADSPI_SR, + sr, (sr & SR_FTF), 10, + STM32_QSPI_FIFO_TIMEOUT_US); + if (ret) { + dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr); + break; + } + tx_fifo(buf++, qspi->io_base + QUADSPI_DR); + } + + return ret; +} + +static int stm32_qspi_tx_mm(struct stm32_qspi *qspi, + const struct stm32_qspi_cmd *cmd) +{ + memcpy_fromio(cmd->buf, qspi->mm_base + cmd->addr, cmd->len); + return 0; +} + +static int stm32_qspi_tx(struct stm32_qspi *qspi, + const struct stm32_qspi_cmd *cmd) +{ + if (!cmd->tx_data) + return 0; + + if (cmd->qspimode == CCR_FMODE_MM) + return stm32_qspi_tx_mm(qspi, cmd); + + return stm32_qspi_tx_poll(qspi, cmd); +} + +static int stm32_qspi_send(struct stm32_qspi_flash *flash, + const struct stm32_qspi_cmd *cmd) +{ + struct stm32_qspi *qspi = flash->qspi; + u32 ccr, dcr, cr; + int err; + + err = stm32_qspi_wait_nobusy(qspi); + if (err) + goto abort; + + dcr = readl_relaxed(qspi->io_base + QUADSPI_DCR) & ~DCR_FSIZE_MASK; + dcr |= DCR_FSIZE(flash->fsize); + writel_relaxed(dcr, qspi->io_base + QUADSPI_DCR); + + cr = readl_relaxed(qspi->io_base + QUADSPI_CR); + cr &= ~CR_PRESC_MASK & ~CR_FSEL; + cr |= CR_PRESC(flash->presc); + cr |= flash->cs ? CR_FSEL : 0; + writel_relaxed(cr, qspi->io_base + QUADSPI_CR); + + if (cmd->tx_data) + writel_relaxed(cmd->len - 1, qspi->io_base + QUADSPI_DLR); + + ccr = cmd->framemode | cmd->qspimode; + + if (cmd->dummy) + ccr |= CCR_DCYC(cmd->dummy); + + if (cmd->addr_width) + ccr |= CCR_ADSIZE(cmd->addr_width - 1); + + ccr |= CCR_INST(cmd->opcode); + writel_relaxed(ccr, qspi->io_base + QUADSPI_CCR); + + if (cmd->addr_width && cmd->qspimode != CCR_FMODE_MM) + writel_relaxed(cmd->addr, qspi->io_base + QUADSPI_AR); + + err = stm32_qspi_tx(qspi, cmd); + if (err) + goto abort; + + if (cmd->qspimode != CCR_FMODE_MM) { + err = stm32_qspi_wait_cmd(qspi); + if (err) + goto abort; + writel_relaxed(FCR_CTCF, qspi->io_base + QUADSPI_FCR); + } + + return err; + +abort: + cr = readl_relaxed(qspi->io_base + QUADSPI_CR) | CR_ABORT; + writel_relaxed(cr, qspi->io_base + QUADSPI_CR); + + dev_err(qspi->dev, "%s abort err:%d\n", __func__, err); + return err; +} + +static int stm32_qspi_read_reg(struct spi_nor *nor, + u8 opcode, u8 *buf, int len) +{ + struct stm32_qspi_flash *flash = nor->priv; + struct device *dev = flash->qspi->dev; + struct stm32_qspi_cmd cmd; + + dev_dbg(dev, "read_reg: cmd:%#.2x buf:%p len:%#x\n", opcode, buf, len); + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = opcode; + cmd.tx_data = true; + cmd.len = len; + cmd.buf = buf; + cmd.qspimode = CCR_FMODE_INDR; + + stm32_qspi_set_framemode(nor, &cmd, false); + + return stm32_qspi_send(flash, &cmd); +} + +static int stm32_qspi_write_reg(struct spi_nor *nor, u8 opcode, + u8 *buf, int len) +{ + struct stm32_qspi_flash *flash = nor->priv; + struct device *dev = flash->qspi->dev; + struct stm32_qspi_cmd cmd; + + dev_dbg(dev, "write_reg: cmd:%#.2x buf:%p len:%#x\n", opcode, buf, len); + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = opcode; + cmd.tx_data = !!(buf && len > 0); + cmd.len = len; + cmd.buf = buf; + cmd.qspimode = CCR_FMODE_INDW; + + stm32_qspi_set_framemode(nor, &cmd, false); + + return stm32_qspi_send(flash, &cmd); +} + +static ssize_t stm32_qspi_read(struct spi_nor *nor, loff_t from, size_t len, + u_char *buf) +{ + struct stm32_qspi_flash *flash = nor->priv; + struct stm32_qspi *qspi = flash->qspi; + struct stm32_qspi_cmd cmd; + int err; + + dev_dbg(qspi->dev, "read(%#.2x): buf:%p from:%#.8x len:%#x\n", + nor->read_opcode, buf, (u32)from, len); + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = nor->read_opcode; + cmd.addr_width = nor->addr_width; + cmd.addr = (u32)from; + cmd.tx_data = true; + cmd.dummy = nor->read_dummy; + cmd.len = len; + cmd.buf = buf; + cmd.qspimode = flash->read_mode; + + stm32_qspi_set_framemode(nor, &cmd, true); + err = stm32_qspi_send(flash, &cmd); + + return err ? err : len; +} + +static ssize_t stm32_qspi_write(struct spi_nor *nor, loff_t to, size_t len, + const u_char *buf) +{ + struct stm32_qspi_flash *flash = nor->priv; + struct device *dev = flash->qspi->dev; + struct stm32_qspi_cmd cmd; + int err; + + dev_dbg(dev, "write(%#.2x): buf:%p to:%#.8x len:%#x\n", + nor->program_opcode, buf, (u32)to, len); + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = nor->program_opcode; + cmd.addr_width = nor->addr_width; + cmd.addr = (u32)to; + cmd.tx_data = true; + cmd.len = len; + cmd.buf = (void *)buf; + cmd.qspimode = CCR_FMODE_INDW; + + stm32_qspi_set_framemode(nor, &cmd, false); + err = stm32_qspi_send(flash, &cmd); + + return err ? err : len; +} + +static int stm32_qspi_erase(struct spi_nor *nor, loff_t offs) +{ + struct stm32_qspi_flash *flash = nor->priv; + struct device *dev = flash->qspi->dev; + struct stm32_qspi_cmd cmd; + + dev_dbg(dev, "erase(%#.2x):offs:%#x\n", nor->erase_opcode, (u32)offs); + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = nor->erase_opcode; + cmd.addr_width = nor->addr_width; + cmd.addr = (u32)offs; + cmd.qspimode = CCR_FMODE_INDW; + + stm32_qspi_set_framemode(nor, &cmd, false); + + return stm32_qspi_send(flash, &cmd); +} + +static irqreturn_t stm32_qspi_irq(int irq, void *dev_id) +{ + struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id; + u32 cr, sr, fcr = 0; + + cr = readl_relaxed(qspi->io_base + QUADSPI_CR); + sr = readl_relaxed(qspi->io_base + QUADSPI_SR); + + if ((cr & CR_TCIE) && (sr & SR_TCF)) { + /* tx complete */ + fcr |= FCR_CTCF; + complete(&qspi->cmd_completion); + } else { + dev_info_ratelimited(qspi->dev, "spurious interrupt\n"); + } + + writel_relaxed(fcr, qspi->io_base + QUADSPI_FCR); + + return IRQ_HANDLED; +} + +static int stm32_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops) +{ + struct stm32_qspi_flash *flash = nor->priv; + struct stm32_qspi *qspi = flash->qspi; + + mutex_lock(&qspi->lock); + return 0; +} + +static void stm32_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops) +{ + struct stm32_qspi_flash *flash = nor->priv; + struct stm32_qspi *qspi = flash->qspi; + + mutex_unlock(&qspi->lock); +} + +static int stm32_qspi_flash_setup(struct stm32_qspi *qspi, + struct device_node *np) +{ + u32 width, flash_read, presc, cs_num, max_rate = 0; + struct stm32_qspi_flash *flash; + struct mtd_info *mtd; + int ret; + + of_property_read_u32(np, "reg", &cs_num); + if (cs_num >= STM32_MAX_NORCHIP) + return -EINVAL; + + of_property_read_u32(np, "spi-max-frequency", &max_rate); + if (!max_rate) + return -EINVAL; + + presc = DIV_ROUND_UP(qspi->clk_rate, max_rate) - 1; + + if (of_property_read_u32(np, "spi-rx-bus-width", &width)) + width = 1; + + if (width == 4) + flash_read = SPI_NOR_QUAD; + else if (width == 2) + flash_read = SPI_NOR_DUAL; + else if (width == 1) + flash_read = SPI_NOR_NORMAL; + else + return -EINVAL; + + flash = &qspi->flash[cs_num]; + flash->qspi = qspi; + flash->cs = cs_num; + flash->presc = presc; + + flash->nor.dev = qspi->dev; + spi_nor_set_flash_node(&flash->nor, np); + flash->nor.priv = flash; + mtd = &flash->nor.mtd; + + flash->nor.read = stm32_qspi_read; + flash->nor.write = stm32_qspi_write; + flash->nor.erase = stm32_qspi_erase; + flash->nor.read_reg = stm32_qspi_read_reg; + flash->nor.write_reg = stm32_qspi_write_reg; + flash->nor.prepare = stm32_qspi_prep; + flash->nor.unprepare = stm32_qspi_unprep; + + writel_relaxed(LPTR_DFT_TIMEOUT, qspi->io_base + QUADSPI_LPTR); + + writel_relaxed(CR_PRESC(presc) | CR_FTHRES(3) | CR_TCEN | CR_SSHIFT + | CR_EN, qspi->io_base + QUADSPI_CR); + + /* + * in stm32 qspi controller, QUADSPI_DCR register has a fsize field + * which define the size of nor flash. + * if fsize is NULL, the controller can't sent spi-nor command. + * set a temporary value just to discover the nor flash with + * "spi_nor_scan". After, the right value (mtd->size) can be set. + */ + flash->fsize = FSIZE_VAL(SZ_1K); + + ret = spi_nor_scan(&flash->nor, NULL, flash_read); + if (ret) { + dev_err(qspi->dev, "device scan failed\n"); + return ret; + } + + flash->fsize = FSIZE_VAL(mtd->size); + + flash->read_mode = CCR_FMODE_MM; + if (mtd->size > qspi->mm_size) + flash->read_mode = CCR_FMODE_INDR; + + writel_relaxed(DCR_CSHT(1), qspi->io_base + QUADSPI_DCR); + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + dev_err(qspi->dev, "mtd device parse failed\n"); + return ret; + } + + flash->registered = true; + + dev_dbg(qspi->dev, "read mm:%s cs:%d bus:%d\n", + flash->read_mode == CCR_FMODE_MM ? "yes" : "no", cs_num, width); + + return 0; +} + +static void stm32_qspi_mtd_free(struct stm32_qspi *qspi) +{ + int i; + + for (i = 0; i < STM32_MAX_NORCHIP; i++) + if (qspi->flash[i].registered) + mtd_device_unregister(&qspi->flash[i].nor.mtd); +} + +static int stm32_qspi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *flash_np; + struct reset_control *rstc; + struct stm32_qspi *qspi; + struct resource *res; + int ret, irq; + + qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL); + if (!qspi) + return -ENOMEM; + + qspi->nor_num = of_get_child_count(dev->of_node); + if (!qspi->nor_num || qspi->nor_num > STM32_MAX_NORCHIP) + return -ENODEV; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi"); + qspi->io_base = devm_ioremap_resource(dev, res); + if (IS_ERR(qspi->io_base)) + return PTR_ERR(qspi->io_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm"); + qspi->mm_base = devm_ioremap_resource(dev, res); + if (IS_ERR(qspi->mm_base)) + return PTR_ERR(qspi->mm_base); + + qspi->mm_size = resource_size(res); + + irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0, + dev_name(dev), qspi); + if (ret) { + dev_err(dev, "failed to request irq\n"); + return ret; + } + + init_completion(&qspi->cmd_completion); + + qspi->clk = devm_clk_get(dev, NULL); + if (IS_ERR(qspi->clk)) + return PTR_ERR(qspi->clk); + + qspi->clk_rate = clk_get_rate(qspi->clk); + if (!qspi->clk_rate) + return -EINVAL; + + ret = clk_prepare_enable(qspi->clk); + if (ret) { + dev_err(dev, "can not enable the clock\n"); + return ret; + } + + rstc = devm_reset_control_get(dev, NULL); + if (!IS_ERR(rstc)) { + reset_control_assert(rstc); + udelay(2); + reset_control_deassert(rstc); + } + + qspi->dev = dev; + platform_set_drvdata(pdev, qspi); + mutex_init(&qspi->lock); + + for_each_available_child_of_node(dev->of_node, flash_np) { + ret = stm32_qspi_flash_setup(qspi, flash_np); + if (ret) { + dev_err(dev, "unable to setup flash chip\n"); + goto err_flash; + } + } + + return 0; + +err_flash: + mutex_destroy(&qspi->lock); + stm32_qspi_mtd_free(qspi); + + clk_disable_unprepare(qspi->clk); + return ret; +} + +static int stm32_qspi_remove(struct platform_device *pdev) +{ + struct stm32_qspi *qspi = platform_get_drvdata(pdev); + + /* disable qspi */ + writel_relaxed(0, qspi->io_base + QUADSPI_CR); + + stm32_qspi_mtd_free(qspi); + mutex_destroy(&qspi->lock); + + clk_disable_unprepare(qspi->clk); + return 0; +} + +static const struct of_device_id stm32_qspi_match[] = { + {.compatible = "st,stm32f469-qspi"}, + {} +}; +MODULE_DEVICE_TABLE(of, stm32_qspi_match); + +static struct platform_driver stm32_qspi_driver = { + .probe = stm32_qspi_probe, + .remove = stm32_qspi_remove, + .driver = { + .name = "stm32-quadspi", + .of_match_table = stm32_qspi_match, + }, +}; +module_platform_driver(stm32_qspi_driver); + +MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 1b2e9217ec78..486e1e6997fc 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -986,9 +986,9 @@ static int cops_close(struct net_device *dev) static struct net_device *cops_dev; MODULE_LICENSE("GPL"); -module_param(io, int, 0); -module_param(irq, int, 0); -module_param(board_type, int, 0); +module_param_hw(io, int, ioport, 0); +module_param_hw(irq, int, irq, 0); +module_param_hw(board_type, int, other, 0); static int __init cops_module_init(void) { diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index 01e2ac55c137..ac755d2950a6 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -1231,9 +1231,9 @@ static struct net_device *dev_ltpc; MODULE_LICENSE("GPL"); module_param(debug, int, 0); -module_param(io, int, 0); -module_param(irq, int, 0); -module_param(dma, int, 0); +module_param_hw(io, int, ioport, 0); +module_param_hw(irq, int, irq, 0); +module_param_hw(dma, int, dma, 0); static int __init ltpc_module_init(void) diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c index b9e9931353b2..38fa60ddaf2e 100644 --- a/drivers/net/arcnet/com20020-isa.c +++ b/drivers/net/arcnet/com20020-isa.c @@ -129,8 +129,8 @@ static int clockp = 0; static int clockm = 0; module_param(node, int, 0); -module_param(io, int, 0); -module_param(irq, int, 0); +module_param_hw(io, int, ioport, 0); +module_param_hw(irq, int, irq, 0); module_param_string(device, device, sizeof(device), 0); module_param(timeout, int, 0); module_param(backplane, int, 0); diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c index b57863df5bf5..4e56aaf2b984 100644 --- a/drivers/net/arcnet/com90io.c +++ b/drivers/net/arcnet/com90io.c @@ -347,8 +347,8 @@ static int io; /* use the insmod io= irq= shmem= options */ static int irq; static char device[9]; /* use eg. device=arc1 to change name */ -module_param(io, int, 0); -module_param(irq, int, 0); +module_param_hw(io, int, ioport, 0); +module_param_hw(irq, int, irq, 0); module_param_string(device, device, sizeof(device), 0); MODULE_LICENSE("GPL"); diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c index 81f90c4703ae..ca4a57c30bf8 100644 --- a/drivers/net/arcnet/com90xx.c +++ b/drivers/net/arcnet/com90xx.c @@ -88,8 +88,8 @@ static int irq; static int shmem; static char device[9]; /* use eg. device=arc1 to change name */ -module_param(io, int, 0); -module_param(irq, int, 0); +module_param_hw(io, int, ioport, 0); +module_param_hw(irq, int, irq, 0); module_param(shmem, int, 0); module_param_string(device, device, sizeof(device), 0); diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index c502c139d3bc..47a8103610bc 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -549,7 +549,8 @@ static int bond_fill_info(struct sk_buff *skb, targets_added = 0; for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) { if (bond->params.arp_targets[i]) { - nla_put_be32(skb, i, bond->params.arp_targets[i]); + if (nla_put_be32(skb, i, bond->params.arp_targets[i])) + goto nla_put_failure; targets_added = 1; } } diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index bc0eb47eccee..6122768c8644 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -679,8 +679,7 @@ static int cfv_probe(struct virtio_device *vdev) goto err; /* Get the TX virtio ring. This is a "guest side vring". */ - err = vdev->config->find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names, - NULL); + err = virtio_find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names, NULL); if (err) goto err; diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c index e0d15711e9ac..3a30fd3b4498 100644 --- a/drivers/net/can/cc770/cc770_isa.c +++ b/drivers/net/can/cc770/cc770_isa.c @@ -82,16 +82,16 @@ static u8 cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static u8 bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; -module_param_array(port, ulong, NULL, S_IRUGO); +module_param_hw_array(port, ulong, ioport, NULL, S_IRUGO); MODULE_PARM_DESC(port, "I/O port number"); -module_param_array(mem, ulong, NULL, S_IRUGO); +module_param_hw_array(mem, ulong, iomem, NULL, S_IRUGO); MODULE_PARM_DESC(mem, "I/O memory address"); -module_param_array(indirect, int, NULL, S_IRUGO); +module_param_hw_array(indirect, int, ioport, NULL, S_IRUGO); MODULE_PARM_DESC(indirect, "Indirect access via address and data port"); -module_param_array(irq, int, NULL, S_IRUGO); +module_param_hw_array(irq, int, irq, NULL, S_IRUGO); MODULE_PARM_DESC(irq, "IRQ number"); module_param_array(clk, int, NULL, S_IRUGO); diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c index e97e6d35b300..a89c1e92554d 100644 --- a/drivers/net/can/sja1000/sja1000_isa.c +++ b/drivers/net/can/sja1000/sja1000_isa.c @@ -48,16 +48,16 @@ static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; static spinlock_t indirect_lock[MAXDEV]; /* lock for indirect access mode */ -module_param_array(port, ulong, NULL, S_IRUGO); +module_param_hw_array(port, ulong, ioport, NULL, S_IRUGO); MODULE_PARM_DESC(port, "I/O port number"); -module_param_array(mem, ulong, NULL, S_IRUGO); +module_param_hw_array(mem, ulong, iomem, NULL, S_IRUGO); MODULE_PARM_DESC(mem, "I/O memory address"); -module_param_array(indirect, int, NULL, S_IRUGO); +module_param_hw_array(indirect, int, ioport, NULL, S_IRUGO); MODULE_PARM_DESC(indirect, "Indirect access via address and data port"); -module_param_array(irq, int, NULL, S_IRUGO); +module_param_hw_array(irq, int, irq, NULL, S_IRUGO); MODULE_PARM_DESC(irq, "IRQ number"); module_param_array(clk, int, NULL, S_IRUGO); diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index f0fc4de4fc9a..a19e1781e9bb 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -256,6 +256,9 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev) return -ENOMEM; ps = devm_kzalloc(&mdiodev->dev, sizeof(*ps), GFP_KERNEL); + if (!ps) + return -ENOMEM; + ps->netdev = dev_get_by_name(&init_net, pdata->netdev); if (!ps->netdev) return -EPROBE_DEFER; diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index c7f9f2c77da7..db8592d412ab 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -1371,7 +1371,7 @@ el3_resume(struct device *pdev) #endif /* CONFIG_PM */ module_param(debug,int, 0); -module_param_array(irq, int, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); module_param(max_interrupt_work, int, 0); MODULE_PARM_DESC(debug, "debug level (0-6)"); MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 40196f41768a..e41245a54f8b 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -813,8 +813,8 @@ module_param(global_enable_wol, int, 0); module_param_array(enable_wol, int, NULL, 0); module_param(rx_copybreak, int, 0); module_param(max_interrupt_work, int, 0); -module_param(compaq_ioaddr, int, 0); -module_param(compaq_irq, int, 0); +module_param_hw(compaq_ioaddr, int, ioport, 0); +module_param_hw(compaq_irq, int, irq, 0); module_param(compaq_device_id, int, 0); module_param(watchdog, int, 0); module_param(global_use_mmio, int, 0); diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index c063b410a163..66f47987e2a2 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -74,8 +74,8 @@ static int bad[MAX_NE_CARDS]; static u32 ne_msg_enable; #ifdef MODULE -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); module_param_array(bad, int, NULL, 0); module_param_named(msg_enable, ne_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH)); MODULE_PARM_DESC(io, "I/O base address(es),required"); diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c index 364b6514f65f..4e02f6a23575 100644 --- a/drivers/net/ethernet/8390/smc-ultra.c +++ b/drivers/net/ethernet/8390/smc-ultra.c @@ -561,8 +561,8 @@ static struct net_device *dev_ultra[MAX_ULTRA_CARDS]; static int io[MAX_ULTRA_CARDS]; static int irq[MAX_ULTRA_CARDS]; -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); module_param_named(msg_enable, ultra_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH)); MODULE_PARM_DESC(io, "I/O base address(es)"); MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c index ad019cbc698f..6efa2722f850 100644 --- a/drivers/net/ethernet/8390/wd.c +++ b/drivers/net/ethernet/8390/wd.c @@ -503,10 +503,10 @@ static int irq[MAX_WD_CARDS]; static int mem[MAX_WD_CARDS]; static int mem_end[MAX_WD_CARDS]; /* for non std. mem size */ -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(mem, int, NULL, 0); -module_param_array(mem_end, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); +module_param_hw_array(mem, int, iomem, NULL, 0); +module_param_hw_array(mem_end, int, iomem, NULL, 0); module_param_named(msg_enable, wd_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH)); MODULE_PARM_DESC(io, "I/O base address(es)"); MODULE_PARM_DESC(irq, "IRQ number(s) (ignored for PureData boards)"); diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index 61a641f23149..12a6a93d221b 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -318,9 +318,9 @@ static int io[MAX_CARDS]; static int dma[MAX_CARDS]; static int irq[MAX_CARDS]; -module_param_array(io, int, NULL, 0); -module_param_array(dma, int, NULL, 0); -module_param_array(irq, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); +module_param_hw_array(dma, int, dma, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); module_param(lance_debug, int, 0); MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required"); MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)"); diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c index 5985bf220a8d..e248d1ab3e47 100644 --- a/drivers/net/ethernet/amd/ni65.c +++ b/drivers/net/ethernet/amd/ni65.c @@ -1227,9 +1227,9 @@ static void set_multicast_list(struct net_device *dev) #ifdef MODULE static struct net_device *dev_ni65; -module_param(irq, int, 0); -module_param(io, int, 0); -module_param(dma, int, 0); +module_param_hw(irq, int, irq, 0); +module_param_hw(io, int, ioport, 0); +module_param_hw(dma, int, dma, 0); MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)"); MODULE_PARM_DESC(io, "ni6510 I/O base address"); MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index cdb02991f249..9ee1c5016784 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -755,7 +755,7 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) count = 0U; for (i = 0U, aq_vec = self->aq_vec[0]; - self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { data += count; aq_vec_get_sw_stats(aq_vec, data, &count); } @@ -959,8 +959,10 @@ void aq_nic_free_hot_resources(struct aq_nic_s *self) goto err_exit; for (i = AQ_DIMOF(self->aq_vec); i--;) { - if (self->aq_vec[i]) + if (self->aq_vec[i]) { aq_vec_free(self->aq_vec[i]); + self->aq_vec[i] = NULL; + } } err_exit:; diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index a8c2db881b75..567ee54504bc 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -838,7 +838,7 @@ static int alx_enable_msix(struct alx_priv *alx) err = pci_alloc_irq_vectors(alx->hw.pdev, num_vec, num_vec, PCI_IRQ_MSIX); - if (err) { + if (err < 0) { netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n"); return err; } @@ -904,7 +904,7 @@ static int alx_init_intr(struct alx_priv *alx) ret = pci_alloc_irq_vectors(alx->hw.pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_LEGACY); - if (ret) + if (ret < 0) return ret; alx->num_vec = 1; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b3ba66032980..b56c54d68d5e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3000,7 +3000,8 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); bp->ntp_fltr_count = 0; - bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), + bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), + sizeof(long), GFP_KERNEL); if (!bp->ntp_fltr_bmap) diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 0f6811860ad5..a36e38676640 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -2845,7 +2845,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) { - memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); + strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); } static void diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index 286593922139..31032de5843b 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -547,8 +547,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string) for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) { BUG_ON(!(strlen(bnad_net_stats_strings[i]) < ETH_GSTRING_LEN)); - memcpy(string, bnad_net_stats_strings[i], - ETH_GSTRING_LEN); + strncpy(string, bnad_net_stats_strings[i], + ETH_GSTRING_LEN); string += ETH_GSTRING_LEN; } bmap = bna_tx_rid_mask(&bnad->bna); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 1d2be2dd19dd..e88c1808e46f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -108,6 +108,12 @@ enum { PAUSE_AUTONEG = 1 << 2 }; +enum { + FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */ + FEC_RS = 1 << 1, /* Reed-Solomon */ + FEC_BASER_RS = 1 << 2 /* BaseR/Reed-Solomon */ +}; + struct port_stats { u64 tx_octets; /* total # of octets in good frames */ u64 tx_frames; /* all good frames */ @@ -432,6 +438,9 @@ struct link_config { unsigned int speed; /* actual link speed */ unsigned char requested_fc; /* flow control user has requested */ unsigned char fc; /* actual link flow control */ + unsigned char auto_fec; /* Forward Error Correction: */ + unsigned char requested_fec; /* "automatic" (IEEE 802.3), */ + unsigned char fec; /* requested, and actual in use */ unsigned char autoneg; /* autonegotiating? */ unsigned char link_ok; /* link up? */ unsigned char link_down_rc; /* link down reason */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 0de8eb72325c..aded42b96f6d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3707,7 +3707,8 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc) { struct fw_port_cmd c; - unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO); + unsigned int mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO); + unsigned int fc = 0, fec = 0, fw_fec = 0; lc->link_ok = 0; if (lc->requested_fc & PAUSE_RX) @@ -3715,6 +3716,13 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, if (lc->requested_fc & PAUSE_TX) fc |= FW_PORT_CAP_FC_TX; + fec = lc->requested_fec & FEC_AUTO ? lc->auto_fec : lc->requested_fec; + + if (fec & FEC_RS) + fw_fec |= FW_PORT_CAP_FEC_RS; + if (fec & FEC_BASER_RS) + fw_fec |= FW_PORT_CAP_FEC_BASER_RS; + memset(&c, 0, sizeof(c)); c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F | FW_CMD_EXEC_F | @@ -3725,13 +3733,15 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, if (!(lc->supported & FW_PORT_CAP_ANEG)) { c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) | - fc); + fc | fw_fec); lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); } else if (lc->autoneg == AUTONEG_DISABLE) { - c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi); + c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | + fw_fec | mdi); lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); } else - c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi); + c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | + fw_fec | mdi); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -7407,13 +7417,26 @@ static void get_pci_mode(struct adapter *adapter, struct pci_params *p) * Initializes the SW state maintained for each link, including the link's * capabilities and default speed/flow-control/autonegotiation settings. */ -static void init_link_config(struct link_config *lc, unsigned int caps) +static void init_link_config(struct link_config *lc, unsigned int pcaps, + unsigned int acaps) { - lc->supported = caps; + lc->supported = pcaps; lc->lp_advertising = 0; lc->requested_speed = 0; lc->speed = 0; lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; + lc->auto_fec = 0; + + /* For Forward Error Control, we default to whatever the Firmware + * tells us the Link is currently advertising. + */ + if (acaps & FW_PORT_CAP_FEC_RS) + lc->auto_fec |= FEC_RS; + if (acaps & FW_PORT_CAP_FEC_BASER_RS) + lc->auto_fec |= FEC_BASER_RS; + lc->requested_fec = FEC_AUTO; + lc->fec = lc->auto_fec; + if (lc->supported & FW_PORT_CAP_ANEG) { lc->advertising = lc->supported & ADVERT_MASK; lc->autoneg = AUTONEG_ENABLE; @@ -7991,7 +8014,8 @@ int t4_init_portinfo(struct port_info *pi, int mbox, pi->port_type = FW_PORT_CMD_PTYPE_G(ret); pi->mod_type = FW_PORT_MOD_TYPE_NA; - init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap)); + init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap), + be16_to_cpu(c.u.info.acap)); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 8f8c079d0d2b..251a35e9795c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -2263,9 +2263,9 @@ enum fw_port_cap { FW_PORT_CAP_ANEG = 0x0100, FW_PORT_CAP_MDIX = 0x0200, FW_PORT_CAP_MDIAUTO = 0x0400, - FW_PORT_CAP_FEC = 0x0800, - FW_PORT_CAP_TECHKR = 0x1000, - FW_PORT_CAP_TECHKX4 = 0x2000, + FW_PORT_CAP_FEC_RS = 0x0800, + FW_PORT_CAP_FEC_BASER_RS = 0x1000, + FW_PORT_CAP_FEC_RESERVED = 0x2000, FW_PORT_CAP_802_3_PAUSE = 0x4000, FW_PORT_CAP_802_3_ASM_DIR = 0x8000, }; diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 47384f7323ac..da5b58b853e2 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -1704,12 +1704,12 @@ static int use_dma; /* These generate unused var warnings if ALLOW_DMA = 0 */ static int dma; static int dmasize = 16; /* or 64 */ -module_param(io, int, 0); -module_param(irq, int, 0); +module_param_hw(io, int, ioport, 0); +module_param_hw(irq, int, irq, 0); module_param(debug, int, 0); module_param_string(media, media, sizeof(media), 0); module_param(duplex, int, 0); -module_param(dma , int, 0); +module_param_hw(dma , int, dma, 0); module_param(dmasize , int, 0); module_param(use_dma , int, 0); MODULE_PARM_DESC(io, "cs89x0 I/O base address"); diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index df4a871df633..fd6bcf024729 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -1015,7 +1015,7 @@ static int compact_infoblock(struct net_device *dev, u_char count, u_char *p static int io=0x0;/* EDIT THIS LINE FOR YOUR CONFIGURATION IF NEEDED */ -module_param(io, int, 0); +module_param_hw(io, int, ioport, 0); module_param(de4x5_debug, int, 0); module_param(dec_only, int, 0); module_param(args, charp, 0); diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index 1a31bee6e728..5673b071e39d 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -2966,7 +2966,7 @@ MODULE_DESCRIPTION("HP CASCADE Architecture Driver for 100VG-AnyLan Network Adap #define HP100_DEVICES 5 /* Parameters set by insmod */ static int hp100_port[HP100_DEVICES] = { 0, [1 ... (HP100_DEVICES-1)] = -1 }; -module_param_array(hp100_port, int, NULL, 0); +module_param_hw_array(hp100_port, int, ioport, NULL, 0); /* List of devices */ static struct net_device *hp100_devlist[HP100_DEVICES]; diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 0e0fa7030565..c1af47e45d3f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1789,9 +1789,17 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, } if (err) { - if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) - mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n", - vhcr->op, slave, vhcr->errno, err); + if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) { + if (vhcr->op == MLX4_CMD_ALLOC_RES && + (vhcr->in_modifier & 0xff) == RES_COUNTER && + err == -EDQUOT) + mlx4_dbg(dev, + "Unable to allocate counter for slave %d (%d)\n", + slave, err); + else + mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n", + vhcr->op, slave, vhcr->errno, err); + } vhcr_cmd->status = mlx4_errno_to_status(err); goto out_status; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ffbcb27c05e5..ae5fdc2df654 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1562,6 +1562,11 @@ static int mlx4_en_flow_replace(struct net_device *dev, qpn = priv->drop_qp.qpn; else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) { qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); + if (qpn < priv->rss_map.base_qpn || + qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) { + en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn); + return -EINVAL; + } } else { if (cmd->fs.ring_cookie >= priv->rx_ring_num) { en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n", diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index aa074e57ce06..77abd1813047 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -997,7 +997,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n", eff_mtu, priv->num_frags); for (i = 0; i < priv->num_frags; i++) { - en_err(priv, + en_dbg(DRV, + priv, " frag:%d - size:%d stride:%d\n", i, priv->frag_info[i].frag_size, diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 4aa29ee93013..07516545474f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -311,7 +311,7 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, struct mlx4_priv *priv = mlx4_priv(dev); struct resource_allocator *res_alloc = &priv->mfunc.master.res_tracker.res_alloc[res_type]; - int err = -EINVAL; + int err = -EDQUOT; int allocated, free, reserved, guaranteed, from_free; int from_rsvd; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index b3aaa985956e..694845793af2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -1460,6 +1460,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) params.is_first_pf = p_hwfn->first_on_engine; params.num_pf_cids = iids.cids; params.num_vf_cids = iids.vf_cids; + params.num_tids = iids.tids; params.start_pq = qm_info->start_pq; params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs; params.num_vf_pqs = qm_info->num_vf_pqs; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index bb70522ad362..463927f17032 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1370,7 +1370,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) NULL) + qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, NULL); - norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096); + norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, PAGE_SIZE); min_addr_reg1 = norm_regsize / 4096; pwm_regsize = db_bar_size - norm_regsize; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index c67ff1411799..537d1236a4fe 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1093,10 +1093,12 @@ static int qed_slowpath_stop(struct qed_dev *cdev) qed_free_stream_mem(cdev); if (IS_QED_ETH_IF(cdev)) qed_sriov_disable(cdev, true); + } + + qed_nic_stop(cdev); - qed_nic_stop(cdev); + if (IS_PF(cdev)) qed_slowpath_irq_free(cdev); - } qed_disable_msix(cdev); diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index eb5652073ca8..333876c19d7d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1028,11 +1028,6 @@ int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp) { struct qede_dev *edev = netdev_priv(dev); - if (IS_VF(edev)) { - DP_NOTICE(edev, "VFs don't support XDP\n"); - return -EOPNOTSUPP; - } - switch (xdp->command) { case XDP_SETUP_PROG: return qede_xdp_set(edev, xdp->prog); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index b9ba23d71c61..38b77bbfe4ee 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -563,6 +563,23 @@ static const struct net_device_ops qede_netdev_ops = { #endif }; +static const struct net_device_ops qede_netdev_vf_ops = { + .ndo_open = qede_open, + .ndo_stop = qede_close, + .ndo_start_xmit = qede_start_xmit, + .ndo_set_rx_mode = qede_set_rx_mode, + .ndo_set_mac_address = qede_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = qede_change_mtu, + .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, + .ndo_set_features = qede_set_features, + .ndo_get_stats64 = qede_get_stats64, + .ndo_udp_tunnel_add = qede_udp_tunnel_add, + .ndo_udp_tunnel_del = qede_udp_tunnel_del, + .ndo_features_check = qede_features_check, +}; + /* ------------------------------------------------------------------------- * START OF PROBE / REMOVE * ------------------------------------------------------------------------- @@ -622,7 +639,10 @@ static void qede_init_ndev(struct qede_dev *edev) ndev->watchdog_timeo = TX_TIMEOUT; - ndev->netdev_ops = &qede_netdev_ops; + if (IS_VF(edev)) + ndev->netdev_ops = &qede_netdev_vf_ops; + else + ndev->netdev_ops = &qede_netdev_ops; qede_set_ethtool_ops(ndev); @@ -1313,6 +1333,9 @@ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) if (fp->type & QEDE_FASTPATH_RX) qede_free_mem_rxq(edev, fp->rxq); + if (fp->type & QEDE_FASTPATH_XDP) + qede_free_mem_txq(edev, fp->xdp_tx); + if (fp->type & QEDE_FASTPATH_TX) qede_free_mem_txq(edev, fp->txq); } diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c index 829be21f97b2..28ea0af89aef 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c @@ -765,7 +765,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) sizeof(struct mpi_coredump_global_header); mpi_coredump->mpi_global_header.imageSize = sizeof(struct ql_mpi_coredump); - memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", + strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", sizeof(mpi_coredump->mpi_global_header.idString)); /* Get generic NIC reg dump */ @@ -1255,7 +1255,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev, sizeof(struct mpi_coredump_global_header); mpi_coredump->mpi_global_header.imageSize = sizeof(struct ql_reg_dump); - memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", + strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", sizeof(mpi_coredump->mpi_global_header.idString)); diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index 9bcd4aefc9c5..bed34684994f 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -151,8 +151,8 @@ MODULE_LICENSE("GPL"); module_param(max_interrupt_work, int, 0); module_param(debug, int, 0); -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); module_param_array(xcvr, int, NULL, 0); MODULE_PARM_DESC(max_interrupt_work, "ATP maximum events handled per interrupt"); MODULE_PARM_DESC(debug, "ATP debug level (0-7)"); diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c index c8d84679ede7..d3bb2ba51f40 100644 --- a/drivers/net/ethernet/smsc/smc9194.c +++ b/drivers/net/ethernet/smsc/smc9194.c @@ -1501,8 +1501,8 @@ static void smc_set_multicast_list(struct net_device *dev) static struct net_device *devSMC9194; MODULE_LICENSE("GPL"); -module_param(io, int, 0); -module_param(irq, int, 0); +module_param_hw(io, int, ioport, 0); +module_param_hw(irq, int, irq, 0); module_param(ifport, int, 0); MODULE_PARM_DESC(io, "SMC 99194 I/O base address"); MODULE_PARM_DESC(irq, "SMC 99194 IRQ number"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 39be96779145..22f910795be4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -70,11 +70,8 @@ static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info) return -ENODEV; } -static void stmmac_default_data(struct plat_stmmacenet_data *plat) +static void common_default_data(struct plat_stmmacenet_data *plat) { - plat->bus_id = 1; - plat->phy_addr = 0; - plat->interface = PHY_INTERFACE_MODE_GMII; plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ plat->has_gmac = 1; plat->force_sf_dma_mode = 1; @@ -82,10 +79,6 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat) plat->mdio_bus_data->phy_reset = NULL; plat->mdio_bus_data->phy_mask = 0; - plat->dma_cfg->pbl = 32; - plat->dma_cfg->pblx8 = true; - /* TODO: AXI */ - /* Set default value for multicast hash bins */ plat->multicast_filter_bins = HASH_TABLE_SIZE; @@ -107,12 +100,29 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat) plat->rx_queues_cfg[0].pkt_route = 0x0; } +static void stmmac_default_data(struct plat_stmmacenet_data *plat) +{ + /* Set common default data first */ + common_default_data(plat); + + plat->bus_id = 1; + plat->phy_addr = 0; + plat->interface = PHY_INTERFACE_MODE_GMII; + + plat->dma_cfg->pbl = 32; + plat->dma_cfg->pblx8 = true; + /* TODO: AXI */ +} + static int quark_default_data(struct plat_stmmacenet_data *plat, struct stmmac_pci_info *info) { struct pci_dev *pdev = info->pdev; int ret; + /* Set common default data first */ + common_default_data(plat); + /* * Refuse to load the driver and register net device if MAC controller * does not connect to any PHY interface. @@ -124,27 +134,12 @@ static int quark_default_data(struct plat_stmmacenet_data *plat, plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn); plat->phy_addr = ret; plat->interface = PHY_INTERFACE_MODE_RMII; - plat->clk_csr = 2; - plat->has_gmac = 1; - plat->force_sf_dma_mode = 1; - - plat->mdio_bus_data->phy_reset = NULL; - plat->mdio_bus_data->phy_mask = 0; plat->dma_cfg->pbl = 16; plat->dma_cfg->pblx8 = true; plat->dma_cfg->fixed_burst = 1; /* AXI (TODO) */ - /* Set default value for multicast hash bins */ - plat->multicast_filter_bins = HASH_TABLE_SIZE; - - /* Set default value for unicast filter entries */ - plat->unicast_filter_entries = 1; - - /* Set the maxmtu to a default of JUMBO_LEN */ - plat->maxmtu = JUMBO_LEN; - return 0; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index fa674a8bda0c..f4d7aec50479 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -287,6 +287,10 @@ struct cpsw_ss_regs { /* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */ #define CPSW_V1_SEQ_ID_OFS_SHIFT 16 +#define CPSW_MAX_BLKS_TX 15 +#define CPSW_MAX_BLKS_TX_SHIFT 4 +#define CPSW_MAX_BLKS_RX 5 + struct cpsw_host_regs { u32 max_blks; u32 blk_cnt; @@ -1278,11 +1282,23 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) switch (cpsw->version) { case CPSW_VERSION_1: slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); + /* Increase RX FIFO size to 5 for supporting fullduplex + * flow control mode + */ + slave_write(slave, + (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | + CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS); break; case CPSW_VERSION_2: case CPSW_VERSION_3: case CPSW_VERSION_4: slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); + /* Increase RX FIFO size to 5 for supporting fullduplex + * flow control mode + */ + slave_write(slave, + (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | + CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS); break; } diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 594fa1407e29..1503f10122f7 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -1176,7 +1176,7 @@ static int iobase[NR_PORTS] = { 0x378, }; module_param_array(mode, charp, NULL, 0); MODULE_PARM_DESC(mode, "baycom operating mode"); -module_param_array(iobase, int, NULL, 0); +module_param_hw_array(iobase, int, ioport, NULL, 0); MODULE_PARM_DESC(iobase, "baycom io base address"); MODULE_AUTHOR("Thomas M. Sailer, sailer@ife.ee.ethz.ch, hb9jnx@hb9w.che.eu"); diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c index 809dc25909d1..92b13b39f426 100644 --- a/drivers/net/hamradio/baycom_par.c +++ b/drivers/net/hamradio/baycom_par.c @@ -481,7 +481,7 @@ static int iobase[NR_PORTS] = { 0x378, }; module_param_array(mode, charp, NULL, 0); MODULE_PARM_DESC(mode, "baycom operating mode; eg. par96 or picpar"); -module_param_array(iobase, int, NULL, 0); +module_param_hw_array(iobase, int, ioport, NULL, 0); MODULE_PARM_DESC(iobase, "baycom io base address"); MODULE_AUTHOR("Thomas M. Sailer, sailer@ife.ee.ethz.ch, hb9jnx@hb9w.che.eu"); diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c index ebc06822fd4d..d9a646acca20 100644 --- a/drivers/net/hamradio/baycom_ser_fdx.c +++ b/drivers/net/hamradio/baycom_ser_fdx.c @@ -614,9 +614,9 @@ static int baud[NR_PORTS] = { [0 ... NR_PORTS-1] = 1200 }; module_param_array(mode, charp, NULL, 0); MODULE_PARM_DESC(mode, "baycom operating mode; * for software DCD"); -module_param_array(iobase, int, NULL, 0); +module_param_hw_array(iobase, int, ioport, NULL, 0); MODULE_PARM_DESC(iobase, "baycom io base address"); -module_param_array(irq, int, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); MODULE_PARM_DESC(irq, "baycom irq number"); module_param_array(baud, int, NULL, 0); MODULE_PARM_DESC(baud, "baycom baud rate (300 to 4800)"); diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c index 60fcf512c208..f1c8a9ff3891 100644 --- a/drivers/net/hamradio/baycom_ser_hdx.c +++ b/drivers/net/hamradio/baycom_ser_hdx.c @@ -642,9 +642,9 @@ static int irq[NR_PORTS] = { 4, }; module_param_array(mode, charp, NULL, 0); MODULE_PARM_DESC(mode, "baycom operating mode; * for software DCD"); -module_param_array(iobase, int, NULL, 0); +module_param_hw_array(iobase, int, ioport, NULL, 0); MODULE_PARM_DESC(iobase, "baycom io base address"); -module_param_array(irq, int, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); MODULE_PARM_DESC(irq, "baycom irq number"); MODULE_AUTHOR("Thomas M. Sailer, sailer@ife.ee.ethz.ch, hb9jnx@hb9w.che.eu"); diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index 2479072981a1..dec6b76bc0fb 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -274,7 +274,7 @@ static unsigned long rand; MODULE_AUTHOR("Klaus Kudielka"); MODULE_DESCRIPTION("Driver for high-speed SCC boards"); -module_param_array(io, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); MODULE_LICENSE("GPL"); static void __exit dmascc_exit(void) diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index b6891ada1d7b..7a7c5224a336 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -976,12 +976,10 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCYAMSMCS: if (netif_running(dev)) return -EINVAL; /* Cannot change this parameter when up */ - if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL) - return -ENOBUFS; - if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) { - kfree(ym); - return -EFAULT; - } + ym = memdup_user(ifr->ifr_data, + sizeof(struct yamdrv_ioctl_mcs)); + if (IS_ERR(ym)) + return PTR_ERR(ym); if (ym->bitrate > YAM_MAXBITRATE) { kfree(ym); return -EINVAL; diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 9b0d6148e994..1ce6239a4849 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -1616,17 +1616,14 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return -EPERM; } - image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL); - oldimage = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL); - if (!image || !oldimage) { - error = -ENOMEM; - goto wf_out; - } + image = memdup_user(rq->ifr_data, EEPROM_BYTES); + if (IS_ERR(image)) + return PTR_ERR(image); - error = copy_from_user(image, rq->ifr_data, EEPROM_BYTES); - if (error) { - error = -EFAULT; - goto wf_out; + oldimage = kmalloc(EEPROM_BYTES, GFP_KERNEL); + if (!oldimage) { + kfree(image); + return -ENOMEM; } if (rrpriv->fw_running){ diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index c285eafd3f1c..35f198d83701 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c @@ -2207,11 +2207,11 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" ALI_IRCC_DRIVER_NAME); -module_param_array(io, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); MODULE_PARM_DESC(io, "Base I/O addresses"); -module_param_array(irq, int, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); MODULE_PARM_DESC(irq, "IRQ lines"); -module_param_array(dma, int, NULL, 0); +module_param_hw_array(dma, int, dma, NULL, 0); MODULE_PARM_DESC(dma, "DMA channels"); module_init(ali_ircc_init); diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index aaecc3baaf30..7beae147be11 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c @@ -2396,11 +2396,11 @@ MODULE_LICENSE("GPL"); module_param(qos_mtt_bits, int, 0); MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time"); -module_param_array(io, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); MODULE_PARM_DESC(io, "Base I/O addresses"); -module_param_array(irq, int, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); MODULE_PARM_DESC(irq, "IRQ lines"); -module_param_array(dma, int, NULL, 0); +module_param_hw_array(dma, int, dma, NULL, 0); MODULE_PARM_DESC(dma, "DMA channels"); module_param(dongle_id, int, 0); MODULE_PARM_DESC(dongle_id, "Type-id of used dongle"); diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index dcf92ba80872..23ed89ae5ddc 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c @@ -82,24 +82,24 @@ MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings, defaults #define DMA_INVAL 255 static int ircc_dma = DMA_INVAL; -module_param(ircc_dma, int, 0); +module_param_hw(ircc_dma, int, dma, 0); MODULE_PARM_DESC(ircc_dma, "DMA channel"); #define IRQ_INVAL 255 static int ircc_irq = IRQ_INVAL; -module_param(ircc_irq, int, 0); +module_param_hw(ircc_irq, int, irq, 0); MODULE_PARM_DESC(ircc_irq, "IRQ line"); static int ircc_fir; -module_param(ircc_fir, int, 0); +module_param_hw(ircc_fir, int, ioport, 0); MODULE_PARM_DESC(ircc_fir, "FIR Base Address"); static int ircc_sir; -module_param(ircc_sir, int, 0); +module_param_hw(ircc_sir, int, ioport, 0); MODULE_PARM_DESC(ircc_sir, "SIR Base Address"); static int ircc_cfg; -module_param(ircc_cfg, int, 0); +module_param_hw(ircc_cfg, int, ioport, 0); MODULE_PARM_DESC(ircc_cfg, "Configuration register base address"); static int ircc_transceiver; diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c index 8d5b903d1d9d..282b6c9ae05b 100644 --- a/drivers/net/irda/w83977af_ir.c +++ b/drivers/net/irda/w83977af_ir.c @@ -1263,9 +1263,9 @@ MODULE_LICENSE("GPL"); module_param(qos_mtt_bits, int, 0); MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time"); -module_param_array(io, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); MODULE_PARM_DESC(io, "Base I/O addresses"); -module_param_array(irq, int, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); MODULE_PARM_DESC(irq, "IRQ lines"); /* diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c index 0a0412524cec..0a5f62e0efcc 100644 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c @@ -203,11 +203,14 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev) &md->mux_handle, md, md->mii_bus); if (rc) { dev_info(md->dev, "mdiomux initialization failed\n"); - goto out; + goto out_register; } dev_info(md->dev, "iProc mdiomux registered\n"); return 0; + +out_register: + mdiobus_unregister(bus); out: mdiobus_free(bus); return rc; diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index bb3f71f9fbde..b5cec1824a78 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1088,6 +1088,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) u16 n = 0, index, ndplen; u8 ready2send = 0; u32 delayed_ndp_size; + size_t padding_count; /* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated * accordingly. Otherwise, we should check here. @@ -1244,11 +1245,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) * a ZLP after full sized NTBs. */ if (!(dev->driver_info->flags & FLAG_SEND_ZLP) && - skb_out->len > ctx->min_tx_pkt) - memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, - ctx->tx_max - skb_out->len); - else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0) + skb_out->len > ctx->min_tx_pkt) { + padding_count = ctx->tx_max - skb_out->len; + memset(skb_put(skb_out, padding_count), 0, padding_count); + } else if (skb_out->len < ctx->tx_max && + (skb_out->len % dev->maxpacket) == 0) { *skb_put(skb_out, 1) = 0; /* force short packet */ + } /* set final frame length */ nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 1c6d3923c224..9320d96a1632 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -29,6 +29,7 @@ #include <linux/slab.h> #include <linux/cpu.h> #include <linux/average.h> +#include <net/route.h> static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); @@ -54,17 +55,6 @@ module_param(napi_tx, bool, 0644); */ DECLARE_EWMA(pkt_len, 0, 64) -/* With mergeable buffers we align buffer address and use the low bits to - * encode its true size. Buffer size is up to 1 page so we need to align to - * square root of page size to ensure we reserve enough bits to encode the true - * size. - */ -#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2) - -/* Minimum alignment for mergeable packet buffers. */ -#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \ - 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT) - #define VIRTNET_DRIVER_VERSION "1.0.0" struct virtnet_stats { @@ -112,6 +102,9 @@ struct receive_queue { /* RX: fragments + linear part + virtio header */ struct scatterlist sg[MAX_SKB_FRAGS + 2]; + /* Min single buffer size for mergeable buffers case. */ + unsigned int min_buf_len; + /* Name of this receive queue: input.$index */ char name[40]; }; @@ -277,24 +270,6 @@ static void skb_xmit_done(struct virtqueue *vq) netif_wake_subqueue(vi->dev, vq2txq(vq)); } -static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) -{ - unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1); - return (truesize + 1) * MERGEABLE_BUFFER_ALIGN; -} - -static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx) -{ - return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN); - -} - -static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize) -{ - unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN; - return (unsigned long)buf | (size - 1); -} - /* Called from bottom half context */ static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, @@ -538,15 +513,13 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, while (--*num_buf) { unsigned int buflen; - unsigned long ctx; void *buf; int off; - ctx = (unsigned long)virtqueue_get_buf(rq->vq, &buflen); - if (unlikely(!ctx)) + buf = virtqueue_get_buf(rq->vq, &buflen); + if (unlikely(!buf)) goto err_buf; - buf = mergeable_ctx_to_buf_address(ctx); p = virt_to_head_page(buf); off = buf - page_address(p); @@ -575,10 +548,10 @@ err_buf: static struct sk_buff *receive_mergeable(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, - unsigned long ctx, + void *buf, + void *ctx, unsigned int len) { - void *buf = mergeable_ctx_to_buf_address(ctx); struct virtio_net_hdr_mrg_rxbuf *hdr = buf; u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); struct page *page = virt_to_head_page(buf); @@ -666,7 +639,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } rcu_read_unlock(); - truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); + if (unlikely(len > (unsigned long)ctx)) { + pr_debug("%s: rx error: len %u exceeds truesize %lu\n", + dev->name, len, (unsigned long)ctx); + dev->stats.rx_length_errors++; + goto err_skb; + } + truesize = (unsigned long)ctx; head_skb = page_to_skb(vi, rq, page, offset, len, truesize); curr_skb = head_skb; @@ -675,7 +654,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, while (--num_buf) { int num_skb_frags; - ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); + buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); if (unlikely(!ctx)) { pr_debug("%s: rx error: %d buffers out of %d missing\n", dev->name, num_buf, @@ -685,8 +664,14 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_buf; } - buf = mergeable_ctx_to_buf_address(ctx); page = virt_to_head_page(buf); + if (unlikely(len > (unsigned long)ctx)) { + pr_debug("%s: rx error: len %u exceeds truesize %lu\n", + dev->name, len, (unsigned long)ctx); + dev->stats.rx_length_errors++; + goto err_skb; + } + truesize = (unsigned long)ctx; num_skb_frags = skb_shinfo(curr_skb)->nr_frags; if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { @@ -702,7 +687,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, head_skb->truesize += nskb->truesize; num_skb_frags = 0; } - truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); if (curr_skb != head_skb) { head_skb->data_len += len; head_skb->len += len; @@ -727,14 +711,14 @@ err_xdp: err_skb: put_page(page); while (--num_buf) { - ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); - if (unlikely(!ctx)) { + buf = virtqueue_get_buf(rq->vq, &len); + if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers missing\n", dev->name, num_buf); dev->stats.rx_length_errors++; break; } - page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx)); + page = virt_to_head_page(buf); put_page(page); } err_buf: @@ -745,7 +729,7 @@ xdp_xmit: } static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, - void *buf, unsigned int len) + void *buf, unsigned int len, void **ctx) { struct net_device *dev = vi->dev; struct sk_buff *skb; @@ -756,9 +740,7 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, pr_debug("%s: short packet %i\n", dev->name, len); dev->stats.rx_length_errors++; if (vi->mergeable_rx_bufs) { - unsigned long ctx = (unsigned long)buf; - void *base = mergeable_ctx_to_buf_address(ctx); - put_page(virt_to_head_page(base)); + put_page(virt_to_head_page(buf)); } else if (vi->big_packets) { give_pages(rq, buf); } else { @@ -768,7 +750,7 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, } if (vi->mergeable_rx_bufs) - skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len); + skb = receive_mergeable(dev, vi, rq, buf, ctx, len); else if (vi->big_packets) skb = receive_big(dev, vi, rq, buf, len); else @@ -880,14 +862,15 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, return err; } -static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len) +static unsigned int get_mergeable_buf_len(struct receive_queue *rq, + struct ewma_pkt_len *avg_pkt_len) { const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); unsigned int len; len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), - GOOD_PACKET_LEN, PAGE_SIZE - hdr_len); - return ALIGN(len, MERGEABLE_BUFFER_ALIGN); + rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len); + return ALIGN(len, L1_CACHE_BYTES); } static int add_recvbuf_mergeable(struct virtnet_info *vi, @@ -896,17 +879,17 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, struct page_frag *alloc_frag = &rq->alloc_frag; unsigned int headroom = virtnet_get_headroom(vi); char *buf; - unsigned long ctx; + void *ctx; int err; unsigned int len, hole; - len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); + len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) return -ENOMEM; buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; buf += headroom; /* advance address leaving hole at front of pkt */ - ctx = mergeable_buf_to_ctx(buf, len); + ctx = (void *)(unsigned long)len; get_page(alloc_frag->page); alloc_frag->offset += len + headroom; hole = alloc_frag->size - alloc_frag->offset; @@ -921,7 +904,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, } sg_init_one(rq->sg, buf, len); - err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp); + err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) put_page(virt_to_head_page(buf)); @@ -1032,10 +1015,20 @@ static int virtnet_receive(struct receive_queue *rq, int budget) void *buf; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); - while (received < budget && - (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { - bytes += receive_buf(vi, rq, buf, len); - received++; + if (vi->mergeable_rx_bufs) { + void *ctx; + + while (received < budget && + (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { + bytes += receive_buf(vi, rq, buf, len, ctx); + received++; + } + } else { + while (received < budget && + (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { + bytes += receive_buf(vi, rq, buf, len, NULL); + received++; + } } if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { @@ -1854,7 +1847,6 @@ static int virtnet_reset(struct virtnet_info *vi, int curr_qp, int xdp_qp) virtnet_freeze_down(dev); _remove_vq_common(vi); - dev->config->reset(dev); virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); @@ -2118,9 +2110,7 @@ static void free_unused_bufs(struct virtnet_info *vi) while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { if (vi->mergeable_rx_bufs) { - unsigned long ctx = (unsigned long)buf; - void *base = mergeable_ctx_to_buf_address(ctx); - put_page(virt_to_head_page(base)); + put_page(virt_to_head_page(buf)); } else if (vi->big_packets) { give_pages(&vi->rq[i], buf); } else { @@ -2141,6 +2131,21 @@ static void virtnet_del_vqs(struct virtnet_info *vi) virtnet_free_queues(vi); } +/* How large should a single buffer be so a queue full of these can fit at + * least one full packet? + * Logic below assumes the mergeable buffer header is used. + */ +static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) +{ + const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); + unsigned int rq_size = virtqueue_get_vring_size(vq); + unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; + unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; + unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); + + return max(min_buf_len, hdr_len); +} + static int virtnet_find_vqs(struct virtnet_info *vi) { vq_callback_t **callbacks; @@ -2148,6 +2153,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi) int ret = -ENOMEM; int i, total_vqs; const char **names; + bool *ctx; /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by @@ -2166,6 +2172,13 @@ static int virtnet_find_vqs(struct virtnet_info *vi) names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); if (!names) goto err_names; + if (vi->mergeable_rx_bufs) { + ctx = kzalloc(total_vqs * sizeof(*ctx), GFP_KERNEL); + if (!ctx) + goto err_ctx; + } else { + ctx = NULL; + } /* Parameters for control virtqueue, if any */ if (vi->has_cvq) { @@ -2181,10 +2194,12 @@ static int virtnet_find_vqs(struct virtnet_info *vi) sprintf(vi->sq[i].name, "output.%d", i); names[rxq2vq(i)] = vi->rq[i].name; names[txq2vq(i)] = vi->sq[i].name; + if (ctx) + ctx[rxq2vq(i)] = true; } ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, - names, NULL); + names, ctx, NULL); if (ret) goto err_find; @@ -2196,6 +2211,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi) for (i = 0; i < vi->max_queue_pairs; i++) { vi->rq[i].vq = vqs[rxq2vq(i)]; + vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); vi->sq[i].vq = vqs[txq2vq(i)]; } @@ -2206,6 +2222,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi) return 0; err_find: + kfree(ctx); +err_ctx: kfree(names); err_names: kfree(callbacks); @@ -2282,7 +2300,8 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, BUG_ON(queue_index >= vi->max_queue_pairs); avg = &vi->rq[queue_index].mrg_avg_pkt_len; - return sprintf(buf, "%u\n", get_mergeable_buf_len(avg)); + return sprintf(buf, "%u\n", + get_mergeable_buf_len(&vi->rq[queue_index], avg)); } static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 4ca71bca39ac..6ea16260ec76 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -232,11 +232,11 @@ static int irq[MAX_CARDS+1] = { -1, -1, -1, -1, -1, -1, 0, }; static struct class *cosa_class; #ifdef MODULE -module_param_array(io, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); MODULE_PARM_DESC(io, "The I/O bases of the COSA or SRP cards"); -module_param_array(irq, int, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); MODULE_PARM_DESC(irq, "The IRQ lines of the COSA or SRP cards"); -module_param_array(dma, int, NULL, 0); +module_param_hw_array(dma, int, dma, NULL, 0); MODULE_PARM_DESC(dma, "The DMA channels of the COSA or SRP cards"); MODULE_AUTHOR("Jan \"Yenya\" Kasprzak, <kas@fi.muni.cz>"); diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c index dd6bb3364ad2..4de0737fbf8a 100644 --- a/drivers/net/wan/hostess_sv11.c +++ b/drivers/net/wan/hostess_sv11.c @@ -324,11 +324,11 @@ static void sv11_shutdown(struct z8530_dev *dev) static int io = 0x200; static int irq = 9; -module_param(io, int, 0); +module_param_hw(io, int, ioport, 0); MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card"); -module_param(dma, int, 0); +module_param_hw(dma, int, dma, 0); MODULE_PARM_DESC(dma, "Set this to 1 to use DMA1/DMA3 for TX/RX"); -module_param(irq, int, 0); +module_param_hw(irq, int, irq, 0); MODULE_PARM_DESC(irq, "The interrupt line setting for the Comtrol Hostess SV11 card"); MODULE_AUTHOR("Alan Cox"); diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c index 3ca3419c54a0..bde8c0339831 100644 --- a/drivers/net/wan/sbni.c +++ b/drivers/net/wan/sbni.c @@ -1463,8 +1463,8 @@ set_multicast_list( struct net_device *dev ) #ifdef MODULE -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); module_param_array(baud, int, NULL, 0); module_param_array(rxl, int, NULL, 0); module_param_array(mac, int, NULL, 0); diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c index fbb5aa2c4d8f..c56f2c252113 100644 --- a/drivers/net/wan/sealevel.c +++ b/drivers/net/wan/sealevel.c @@ -363,13 +363,13 @@ static int rxdma=3; static int irq=5; static bool slow=false; -module_param(io, int, 0); +module_param_hw(io, int, ioport, 0); MODULE_PARM_DESC(io, "The I/O base of the Sealevel card"); -module_param(txdma, int, 0); +module_param_hw(txdma, int, dma, 0); MODULE_PARM_DESC(txdma, "Transmit DMA channel"); -module_param(rxdma, int, 0); +module_param_hw(rxdma, int, dma, 0); MODULE_PARM_DESC(rxdma, "Receive DMA channel"); -module_param(irq, int, 0); +module_param_hw(irq, int, irq, 0); MODULE_PARM_DESC(irq, "The interrupt line setting for the SeaLevel card"); module_param(slow, bool, 0); MODULE_PARM_DESC(slow, "Set this for an older Sealevel card such as the 4012"); diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h index 649ecad6844c..eff4f464a23e 100644 --- a/drivers/net/wimax/i2400m/i2400m-usb.h +++ b/drivers/net/wimax/i2400m/i2400m-usb.h @@ -131,7 +131,7 @@ static inline int edc_inc(struct edc *edc, u16 max_err, u16 timeframe) unsigned long now; now = jiffies; - if (now - edc->timestart > timeframe) { + if (time_after(now, edc->timestart + timeframe)) { edc->errorcount = 1; edc->timestart = now; } else if (++edc->errorcount > max_err) { diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index 68fcbe03bce2..b3f20b3c0210 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -522,7 +522,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0; rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7); rxs->enc_flags |= (rxsp->status4 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0; - rxs->enc_flags |= (rxsp->status4 & AR_2040) ? RX_ENC_FLAG_40MHZ : 0; + rxs->bw = (rxsp->status4 & AR_2040) ? RATE_INFO_BW_40 : RATE_INFO_BW_20; rxs->evm0 = rxsp->status6; rxs->evm1 = rxsp->status7; diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 6128c2bb23d8..77c94f9e7b61 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -580,8 +580,8 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, /* directly mapped flags for ieee80211_rx_status */ rs->enc_flags |= (ads.ds_rxstatus3 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0; - rs->enc_flags |= - (ads.ds_rxstatus3 & AR_2040) ? RX_ENC_FLAG_40MHZ : 0; + rs->bw = (ads.ds_rxstatus3 & AR_2040) ? RATE_INFO_BW_40 : + RATE_INFO_BW_20; if (AR_SREV_9280_20_OR_LATER(ah)) rs->enc_flags |= (ads.ds_rxstatus3 & AR_STBC) ? diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 4b040451a9b8..1b7e125a28e2 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -246,8 +246,8 @@ MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet cards. " "Direct support for ISA/PCI/MPI cards and support for PCMCIA when used with airo_cs."); MODULE_LICENSE("Dual BSD/GPL"); MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340/350"); -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); module_param_array(rates, int, NULL, 0); module_param_array(ssids, charp, NULL, 0); module_param(auto_wep, int, 0); diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 5d5faa3cad24..49a2ff15ddae 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -734,7 +734,9 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) if (rate_n_flags & RATE_MCS_HT_MSK) rx_status.encoding = RX_ENC_HT; if (rate_n_flags & RATE_MCS_HT40_MSK) - rx_status.enc_flags |= RX_ENC_FLAG_40MHZ; + rx_status.bw = RATE_INFO_BW_40; + else + rx_status.bw = RATE_INFO_BW_20; if (rate_n_flags & RATE_MCS_SGI_MSK) rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c index 1ee1ba9931a7..adfd6307edca 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -889,7 +889,9 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv, if (rate_n_flags & RATE_MCS_HT_MSK) rx_status.encoding = RX_ENC_HT; if (rate_n_flags & RATE_MCS_HT40_MSK) - rx_status.enc_flags |= RX_ENC_FLAG_40MHZ; + rx_status.bw = RATE_INFO_BW_40; + else + rx_status.bw = RATE_INFO_BW_20; if (rate_n_flags & RATE_MCS_SGI_MSK) rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_MCS_GF_MSK) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 87444af20fc5..002b25cff5b6 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1201,7 +1201,13 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, rx_status.encoding = RX_ENC_HT; } if (info->control.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - rx_status.enc_flags |= RX_ENC_FLAG_40MHZ; + rx_status.bw = RATE_INFO_BW_40; + else if (info->control.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH) + rx_status.bw = RATE_INFO_BW_80; + else if (info->control.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH) + rx_status.bw = RATE_INFO_BW_160; + else + rx_status.bw = RATE_INFO_BW_20; if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI) rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI; /* TODO: simulate real signal strength (and optional packet loss) */ diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 8c4adac6fafc..f5df78ed1e10 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -367,7 +367,8 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb, if (unlikely(elba > nvmdev->total_secs)) { pr_err("nvm: L2P data from device is out of bounds!\n"); - return -EINVAL; + ret = -EINVAL; + goto out; } /* Transform physical address to target address space */ @@ -464,8 +465,8 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas, return ret; } -static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd, - struct nvme_ns *ns, struct nvme_nvm_command *c) +static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns, + struct nvme_nvm_command *c) { c->ph_rw.opcode = rqd->opcode; c->ph_rw.nsid = cpu_to_le32(ns->ns_id); @@ -503,7 +504,7 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) if (!cmd) return -ENOMEM; - nvme_nvm_rqtocmd(rq, rqd, ns, cmd); + nvme_nvm_rqtocmd(rqd, ns, cmd); rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY); if (IS_ERR(rq)) { diff --git a/drivers/of/device.c b/drivers/of/device.c index 6e2f9113b1b7..9416d052cb89 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -82,7 +82,7 @@ int of_device_add(struct platform_device *ofdev) * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events * to fix up DMA configuration. */ -void of_dma_configure(struct device *dev, struct device_node *np) +int of_dma_configure(struct device *dev, struct device_node *np) { u64 dma_addr, paddr, size; int ret; @@ -107,7 +107,7 @@ void of_dma_configure(struct device *dev, struct device_node *np) ret = of_dma_get_range(np, &dma_addr, &paddr, &size); if (ret < 0) { dma_addr = offset = 0; - size = dev->coherent_dma_mask + 1; + size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); } else { offset = PFN_DOWN(paddr - dma_addr); @@ -123,7 +123,7 @@ void of_dma_configure(struct device *dev, struct device_node *np) if (!size) { dev_err(dev, "Adjusted size 0x%llx invalid\n", size); - return; + return -EINVAL; } dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset); } @@ -144,13 +144,30 @@ void of_dma_configure(struct device *dev, struct device_node *np) coherent ? " " : " not "); iommu = of_iommu_configure(dev, np); + if (IS_ERR(iommu)) + return PTR_ERR(iommu); + dev_dbg(dev, "device is%sbehind an iommu\n", iommu ? " " : " not "); arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent); + + return 0; } EXPORT_SYMBOL_GPL(of_dma_configure); +/** + * of_dma_deconfigure - Clean up DMA configuration + * @dev: Device for which to clean up DMA configuration + * + * Clean up all configuration performed by of_dma_configure_ops() and free all + * resources that have been allocated. + */ +void of_dma_deconfigure(struct device *dev) +{ + arch_teardown_dma_ops(dev); +} + int of_device_register(struct platform_device *pdev) { device_initialize(&pdev->dev); diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 45b413e5a444..71fecc2debfc 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -22,6 +22,7 @@ #include <linux/slab.h> #include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_iommu.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/platform_device.h> @@ -158,11 +159,6 @@ struct platform_device *of_device_alloc(struct device_node *np, } EXPORT_SYMBOL(of_device_alloc); -static void of_dma_deconfigure(struct device *dev) -{ - arch_teardown_dma_ops(dev); -} - /** * of_platform_device_create_pdata - Alloc, initialize and register an of_device * @np: pointer to node to create device for @@ -191,11 +187,9 @@ static struct platform_device *of_platform_device_create_pdata( dev->dev.bus = &platform_bus_type; dev->dev.platform_data = platform_data; - of_dma_configure(&dev->dev, dev->dev.of_node); of_msi_configure(&dev->dev, dev->dev.of_node); if (of_device_add(dev) != 0) { - of_dma_deconfigure(&dev->dev); platform_device_put(dev); goto err_clear_flag; } @@ -253,7 +247,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node, dev_set_name(&dev->dev, "%s", bus_id); else of_device_make_bus_id(&dev->dev); - of_dma_configure(&dev->dev, dev->dev.of_node); /* Allow the HW Peripheral ID to be overridden */ prop = of_get_property(node, "arm,primecell-periphid", NULL); @@ -547,7 +540,6 @@ static int of_platform_device_destroy(struct device *dev, void *data) amba_device_unregister(to_amba_device(dev)); #endif - of_dma_deconfigure(dev); of_node_clear_flag(dev->of_node, OF_POPULATED); of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); return 0; diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 9d42dfe65d44..5548193a28a6 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -3150,13 +3150,13 @@ static char *irq[PARPORT_PC_MAX_PORTS]; static char *dma[PARPORT_PC_MAX_PORTS]; MODULE_PARM_DESC(io, "Base I/O address (SPP regs)"); -module_param_array(io, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); MODULE_PARM_DESC(io_hi, "Base I/O address (ECR)"); -module_param_array(io_hi, int, NULL, 0); +module_param_hw_array(io_hi, int, ioport, NULL, 0); MODULE_PARM_DESC(irq, "IRQ line"); -module_param_array(irq, charp, NULL, 0); +module_param_hw_array(irq, charp, irq, NULL, 0); MODULE_PARM_DESC(dma, "DMA channel"); -module_param_array(dma, charp, NULL, 0); +module_param_hw_array(dma, charp, dma, NULL, 0); #if defined(CONFIG_PARPORT_PC_SUPERIO) || \ (defined(CONFIG_PARPORT_1284) && defined(CONFIG_PARPORT_PC_FIFO)) MODULE_PARM_DESC(verbose_probing, "Log chit-chat during initialisation"); diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c index 88a44a707b96..bbf9cf8aeaad 100644 --- a/drivers/pci/hotplug/cpcihp_generic.c +++ b/drivers/pci/hotplug/cpcihp_generic.c @@ -220,7 +220,7 @@ module_param(first_slot, byte, 0); MODULE_PARM_DESC(first_slot, "Hotswap bus first slot number"); module_param(last_slot, byte, 0); MODULE_PARM_DESC(last_slot, "Hotswap bus last slot number"); -module_param(port, ushort, 0); +module_param_hw(port, ushort, ioport, 0); MODULE_PARM_DESC(port, "#ENUM signal I/O port"); module_param(enum_bit, uint, 0); MODULE_PARM_DESC(enum_bit, "#ENUM signal bit (0-7)"); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 01eb8038fceb..19c8950c6c38 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1914,33 +1914,6 @@ static void pci_set_msi_domain(struct pci_dev *dev) dev_set_msi_domain(&dev->dev, d); } -/** - * pci_dma_configure - Setup DMA configuration - * @dev: ptr to pci_dev struct of the PCI device - * - * Function to update PCI devices's DMA configuration using the same - * info from the OF node or ACPI node of host bridge's parent (if any). - */ -static void pci_dma_configure(struct pci_dev *dev) -{ - struct device *bridge = pci_get_host_bridge_device(dev); - - if (IS_ENABLED(CONFIG_OF) && - bridge->parent && bridge->parent->of_node) { - of_dma_configure(&dev->dev, bridge->parent->of_node); - } else if (has_acpi_companion(bridge)) { - struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); - enum dev_dma_attr attr = acpi_get_dma_attr(adev); - - if (attr == DEV_DMA_NOT_SUPPORTED) - dev_warn(&dev->dev, "DMA not supported.\n"); - else - acpi_dma_configure(&dev->dev, attr); - } - - pci_put_host_bridge_device(bridge); -} - void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) { int ret; @@ -1954,7 +1927,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) dev->dev.dma_mask = &dev->dma_mask; dev->dev.dma_parms = &dev->dma_parms; dev->dev.coherent_dma_mask = 0xffffffffull; - pci_dma_configure(dev); pci_set_dma_max_seg_size(dev, 65536); pci_set_dma_seg_boundary(dev, 0xffffffff); diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c index eb0d80a429e4..fb38cc01859f 100644 --- a/drivers/pcmcia/i82365.c +++ b/drivers/pcmcia/i82365.c @@ -108,12 +108,12 @@ static int async_clock = -1; static int cable_mode = -1; static int wakeup = 0; -module_param(i365_base, ulong, 0444); +module_param_hw(i365_base, ulong, ioport, 0444); module_param(ignore, int, 0444); module_param(extra_sockets, int, 0444); -module_param(irq_mask, int, 0444); -module_param_array(irq_list, int, &irq_list_count, 0444); -module_param(cs_irq, int, 0444); +module_param_hw(irq_mask, int, other, 0444); +module_param_hw_array(irq_list, int, irq, &irq_list_count, 0444); +module_param_hw(cs_irq, int, irq, 0444); module_param(async_clock, int, 0444); module_param(cable_mode, int, 0444); module_param(wakeup, int, 0444); diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c index 1ee63e5f0550..a1ac72d51d70 100644 --- a/drivers/pcmcia/tcic.c +++ b/drivers/pcmcia/tcic.c @@ -85,12 +85,12 @@ static int poll_quick = HZ/20; /* CCLK external clock time, in nanoseconds. 70 ns = 14.31818 MHz */ static int cycle_time = 70; -module_param(tcic_base, ulong, 0444); +module_param_hw(tcic_base, ulong, ioport, 0444); module_param(ignore, int, 0444); module_param(do_scan, int, 0444); -module_param(irq_mask, int, 0444); -module_param_array(irq_list, int, &irq_list_count, 0444); -module_param(cs_irq, int, 0444); +module_param_hw(irq_mask, int, other, 0444); +module_param_hw_array(irq_list, int, irq, &irq_list_count, 0444); +module_param_hw(cs_irq, int, irq, 0444); module_param(poll_interval, int, 0444); module_param(poll_quick, int, 0444); module_param(cycle_time, int, 0444); diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index 9a25110c4a46..9ddad0815ba9 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c @@ -1164,6 +1164,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD, rapl_defaults_tng), RAPL_CPU(INTEL_FAM6_ATOM_MOOREFIELD, rapl_defaults_ann), RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT, rapl_defaults_core), + RAPL_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, rapl_defaults_core), RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON, rapl_defaults_core), RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL, rapl_defaults_hsw_server), diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 42e37c20b361..313c10789ca2 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -293,6 +293,15 @@ config PWM_MTK_DISP To compile this driver as a module, choose M here: the module will be called pwm-mtk-disp. +config PWM_MEDIATEK + tristate "MediaTek PWM support" + depends on ARCH_MEDIATEK || COMPILE_TEST + help + Generic PWM framework driver for Mediatek ARM SoC. + + To compile this driver as a module, choose M here: the module + will be called pwm-mxs. + config PWM_MXS tristate "Freescale MXS PWM support" depends on ARCH_MXS && OF diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 346a83b00f28..93da1f79a3b8 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -26,6 +26,7 @@ obj-$(CONFIG_PWM_LPSS) += pwm-lpss.o obj-$(CONFIG_PWM_LPSS_PCI) += pwm-lpss-pci.o obj-$(CONFIG_PWM_LPSS_PLATFORM) += pwm-lpss-platform.o obj-$(CONFIG_PWM_MESON) += pwm-meson.o +obj-$(CONFIG_PWM_MEDIATEK) += pwm-mediatek.o obj-$(CONFIG_PWM_MTK_DISP) += pwm-mtk-disp.o obj-$(CONFIG_PWM_MXS) += pwm-mxs.o obj-$(CONFIG_PWM_OMAP_DMTIMER) += pwm-omap-dmtimer.o diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c index 999187277ea5..54c6633d9b5d 100644 --- a/drivers/pwm/pwm-atmel-hlcdc.c +++ b/drivers/pwm/pwm-atmel-hlcdc.c @@ -49,172 +49,181 @@ static inline struct atmel_hlcdc_pwm *to_atmel_hlcdc_pwm(struct pwm_chip *chip) return container_of(chip, struct atmel_hlcdc_pwm, chip); } -static int atmel_hlcdc_pwm_config(struct pwm_chip *c, - struct pwm_device *pwm, - int duty_ns, int period_ns) +static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm, + struct pwm_state *state) { struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c); struct atmel_hlcdc *hlcdc = chip->hlcdc; - struct clk *new_clk = hlcdc->slow_clk; - u64 pwmcval = duty_ns * 256; - unsigned long clk_freq; - u64 clk_period_ns; - u32 pwmcfg; - int pres; - - if (!chip->errata || !chip->errata->slow_clk_erratum) { - clk_freq = clk_get_rate(new_clk); - if (!clk_freq) - return -EINVAL; + unsigned int status; + int ret; - clk_period_ns = (u64)NSEC_PER_SEC * 256; - do_div(clk_period_ns, clk_freq); - } + if (state->enabled) { + struct clk *new_clk = hlcdc->slow_clk; + u64 pwmcval = state->duty_cycle * 256; + unsigned long clk_freq; + u64 clk_period_ns; + u32 pwmcfg; + int pres; + + if (!chip->errata || !chip->errata->slow_clk_erratum) { + clk_freq = clk_get_rate(new_clk); + if (!clk_freq) + return -EINVAL; + + clk_period_ns = (u64)NSEC_PER_SEC * 256; + do_div(clk_period_ns, clk_freq); + } + + /* Errata: cannot use slow clk on some IP revisions */ + if ((chip->errata && chip->errata->slow_clk_erratum) || + clk_period_ns > state->period) { + new_clk = hlcdc->sys_clk; + clk_freq = clk_get_rate(new_clk); + if (!clk_freq) + return -EINVAL; + + clk_period_ns = (u64)NSEC_PER_SEC * 256; + do_div(clk_period_ns, clk_freq); + } + + for (pres = 0; pres <= ATMEL_HLCDC_PWMPS_MAX; pres++) { + /* Errata: cannot divide by 1 on some IP revisions */ + if (!pres && chip->errata && + chip->errata->div1_clk_erratum) + continue; + + if ((clk_period_ns << pres) >= state->period) + break; + } - /* Errata: cannot use slow clk on some IP revisions */ - if ((chip->errata && chip->errata->slow_clk_erratum) || - clk_period_ns > period_ns) { - new_clk = hlcdc->sys_clk; - clk_freq = clk_get_rate(new_clk); - if (!clk_freq) + if (pres > ATMEL_HLCDC_PWMPS_MAX) return -EINVAL; - clk_period_ns = (u64)NSEC_PER_SEC * 256; - do_div(clk_period_ns, clk_freq); - } + pwmcfg = ATMEL_HLCDC_PWMPS(pres); - for (pres = 0; pres <= ATMEL_HLCDC_PWMPS_MAX; pres++) { - /* Errata: cannot divide by 1 on some IP revisions */ - if (!pres && chip->errata && chip->errata->div1_clk_erratum) - continue; + if (new_clk != chip->cur_clk) { + u32 gencfg = 0; + int ret; - if ((clk_period_ns << pres) >= period_ns) - break; - } + ret = clk_prepare_enable(new_clk); + if (ret) + return ret; - if (pres > ATMEL_HLCDC_PWMPS_MAX) - return -EINVAL; + clk_disable_unprepare(chip->cur_clk); + chip->cur_clk = new_clk; - pwmcfg = ATMEL_HLCDC_PWMPS(pres); + if (new_clk == hlcdc->sys_clk) + gencfg = ATMEL_HLCDC_CLKPWMSEL; - if (new_clk != chip->cur_clk) { - u32 gencfg = 0; - int ret; + ret = regmap_update_bits(hlcdc->regmap, + ATMEL_HLCDC_CFG(0), + ATMEL_HLCDC_CLKPWMSEL, + gencfg); + if (ret) + return ret; + } - ret = clk_prepare_enable(new_clk); - if (ret) - return ret; + do_div(pwmcval, state->period); - clk_disable_unprepare(chip->cur_clk); - chip->cur_clk = new_clk; + /* + * The PWM duty cycle is configurable from 0/256 to 255/256 of + * the period cycle. Hence we can't set a duty cycle occupying + * the whole period cycle if we're asked to. + * Set it to 255 if pwmcval is greater than 256. + */ + if (pwmcval > 255) + pwmcval = 255; - if (new_clk == hlcdc->sys_clk) - gencfg = ATMEL_HLCDC_CLKPWMSEL; + pwmcfg |= ATMEL_HLCDC_PWMCVAL(pwmcval); - ret = regmap_update_bits(hlcdc->regmap, ATMEL_HLCDC_CFG(0), - ATMEL_HLCDC_CLKPWMSEL, gencfg); + if (state->polarity == PWM_POLARITY_NORMAL) + pwmcfg |= ATMEL_HLCDC_PWMPOL; + + ret = regmap_update_bits(hlcdc->regmap, ATMEL_HLCDC_CFG(6), + ATMEL_HLCDC_PWMCVAL_MASK | + ATMEL_HLCDC_PWMPS_MASK | + ATMEL_HLCDC_PWMPOL, + pwmcfg); if (ret) return ret; - } - do_div(pwmcval, period_ns); - - /* - * The PWM duty cycle is configurable from 0/256 to 255/256 of the - * period cycle. Hence we can't set a duty cycle occupying the - * whole period cycle if we're asked to. - * Set it to 255 if pwmcval is greater than 256. - */ - if (pwmcval > 255) - pwmcval = 255; - - pwmcfg |= ATMEL_HLCDC_PWMCVAL(pwmcval); + ret = regmap_write(hlcdc->regmap, ATMEL_HLCDC_EN, + ATMEL_HLCDC_PWM); + if (ret) + return ret; - return regmap_update_bits(hlcdc->regmap, ATMEL_HLCDC_CFG(6), - ATMEL_HLCDC_PWMCVAL_MASK | - ATMEL_HLCDC_PWMPS_MASK, - pwmcfg); -} + ret = regmap_read_poll_timeout(hlcdc->regmap, ATMEL_HLCDC_SR, + status, + status & ATMEL_HLCDC_PWM, + 10, 0); + if (ret) + return ret; + } else { + ret = regmap_write(hlcdc->regmap, ATMEL_HLCDC_DIS, + ATMEL_HLCDC_PWM); + if (ret) + return ret; -static int atmel_hlcdc_pwm_set_polarity(struct pwm_chip *c, - struct pwm_device *pwm, - enum pwm_polarity polarity) -{ - struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c); - struct atmel_hlcdc *hlcdc = chip->hlcdc; - u32 cfg = 0; + ret = regmap_read_poll_timeout(hlcdc->regmap, ATMEL_HLCDC_SR, + status, + !(status & ATMEL_HLCDC_PWM), + 10, 0); + if (ret) + return ret; - if (polarity == PWM_POLARITY_NORMAL) - cfg = ATMEL_HLCDC_PWMPOL; + clk_disable_unprepare(chip->cur_clk); + chip->cur_clk = NULL; + } - return regmap_update_bits(hlcdc->regmap, ATMEL_HLCDC_CFG(6), - ATMEL_HLCDC_PWMPOL, cfg); + return 0; } -static int atmel_hlcdc_pwm_enable(struct pwm_chip *c, struct pwm_device *pwm) -{ - struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c); - struct atmel_hlcdc *hlcdc = chip->hlcdc; - u32 status; - int ret; +static const struct pwm_ops atmel_hlcdc_pwm_ops = { + .apply = atmel_hlcdc_pwm_apply, + .owner = THIS_MODULE, +}; - ret = regmap_write(hlcdc->regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_PWM); - if (ret) - return ret; +static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_at91sam9x5_errata = { + .slow_clk_erratum = true, +}; - while (true) { - ret = regmap_read(hlcdc->regmap, ATMEL_HLCDC_SR, &status); - if (ret) - return ret; +static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_sama5d3_errata = { + .div1_clk_erratum = true, +}; - if ((status & ATMEL_HLCDC_PWM) != 0) - break; +#ifdef CONFIG_PM_SLEEP +static int atmel_hlcdc_pwm_suspend(struct device *dev) +{ + struct atmel_hlcdc_pwm *chip = dev_get_drvdata(dev); - usleep_range(1, 10); - } + /* Keep the periph clock enabled if the PWM is still running. */ + if (pwm_is_enabled(&chip->chip.pwms[0])) + clk_disable_unprepare(chip->hlcdc->periph_clk); return 0; } -static void atmel_hlcdc_pwm_disable(struct pwm_chip *c, - struct pwm_device *pwm) +static int atmel_hlcdc_pwm_resume(struct device *dev) { - struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c); - struct atmel_hlcdc *hlcdc = chip->hlcdc; - u32 status; + struct atmel_hlcdc_pwm *chip = dev_get_drvdata(dev); + struct pwm_state state; int ret; - ret = regmap_write(hlcdc->regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PWM); - if (ret) - return; + pwm_get_state(&chip->chip.pwms[0], &state); - while (true) { - ret = regmap_read(hlcdc->regmap, ATMEL_HLCDC_SR, &status); + /* Re-enable the periph clock it was stopped during suspend. */ + if (!state.enabled) { + ret = clk_prepare_enable(chip->hlcdc->periph_clk); if (ret) - return; - - if ((status & ATMEL_HLCDC_PWM) == 0) - break; - - usleep_range(1, 10); + return ret; } -} - -static const struct pwm_ops atmel_hlcdc_pwm_ops = { - .config = atmel_hlcdc_pwm_config, - .set_polarity = atmel_hlcdc_pwm_set_polarity, - .enable = atmel_hlcdc_pwm_enable, - .disable = atmel_hlcdc_pwm_disable, - .owner = THIS_MODULE, -}; -static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_at91sam9x5_errata = { - .slow_clk_erratum = true, -}; + return atmel_hlcdc_pwm_apply(&chip->chip, &chip->chip.pwms[0], &state); +} +#endif -static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_sama5d3_errata = { - .div1_clk_erratum = true, -}; +static SIMPLE_DEV_PM_OPS(atmel_hlcdc_pwm_pm_ops, + atmel_hlcdc_pwm_suspend, atmel_hlcdc_pwm_resume); static const struct of_device_id atmel_hlcdc_dt_ids[] = { { @@ -305,6 +314,7 @@ static struct platform_driver atmel_hlcdc_pwm_driver = { .driver = { .name = "atmel-hlcdc-pwm", .of_match_table = atmel_hlcdc_pwm_dt_ids, + .pm = &atmel_hlcdc_pwm_pm_ops, }, .probe = atmel_hlcdc_pwm_probe, .remove = atmel_hlcdc_pwm_remove, diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c index 67a7023be5c2..530d7dc5f1b5 100644 --- a/drivers/pwm/pwm-atmel.c +++ b/drivers/pwm/pwm-atmel.c @@ -58,17 +58,22 @@ #define PWM_MAX_PRD 0xFFFF #define PRD_MAX_PRES 10 +struct atmel_pwm_registers { + u8 period; + u8 period_upd; + u8 duty; + u8 duty_upd; +}; + struct atmel_pwm_chip { struct pwm_chip chip; struct clk *clk; void __iomem *base; + const struct atmel_pwm_registers *regs; unsigned int updated_pwms; /* ISR is cleared when read, ensure only one thread does that */ struct mutex isr_lock; - - void (*config)(struct pwm_chip *chip, struct pwm_device *pwm, - unsigned long dty, unsigned long prd); }; static inline struct atmel_pwm_chip *to_atmel_pwm_chip(struct pwm_chip *chip) @@ -105,153 +110,71 @@ static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip, writel_relaxed(val, chip->base + base + offset); } -static int atmel_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns) +static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip, + const struct pwm_state *state, + unsigned long *cprd, u32 *pres) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); - unsigned long prd, dty; - unsigned long long div; - unsigned int pres = 0; - u32 val; - int ret; - - if (pwm_is_enabled(pwm) && (period_ns != pwm_get_period(pwm))) { - dev_err(chip->dev, "cannot change PWM period while enabled\n"); - return -EBUSY; - } + unsigned long long cycles = state->period; /* Calculate the period cycles and prescale value */ - div = (unsigned long long)clk_get_rate(atmel_pwm->clk) * period_ns; - do_div(div, NSEC_PER_SEC); + cycles *= clk_get_rate(atmel_pwm->clk); + do_div(cycles, NSEC_PER_SEC); - while (div > PWM_MAX_PRD) { - div >>= 1; - pres++; - } + for (*pres = 0; cycles > PWM_MAX_PRD; cycles >>= 1) + (*pres)++; - if (pres > PRD_MAX_PRES) { + if (*pres > PRD_MAX_PRES) { dev_err(chip->dev, "pres exceeds the maximum value\n"); return -EINVAL; } - /* Calculate the duty cycles */ - prd = div; - div *= duty_ns; - do_div(div, period_ns); - dty = prd - div; - - ret = clk_enable(atmel_pwm->clk); - if (ret) { - dev_err(chip->dev, "failed to enable PWM clock\n"); - return ret; - } - - /* It is necessary to preserve CPOL, inside CMR */ - val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); - val = (val & ~PWM_CMR_CPRE_MSK) | (pres & PWM_CMR_CPRE_MSK); - atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val); - atmel_pwm->config(chip, pwm, dty, prd); - mutex_lock(&atmel_pwm->isr_lock); - atmel_pwm->updated_pwms |= atmel_pwm_readl(atmel_pwm, PWM_ISR); - atmel_pwm->updated_pwms &= ~(1 << pwm->hwpwm); - mutex_unlock(&atmel_pwm->isr_lock); - - clk_disable(atmel_pwm->clk); - return ret; -} - -static void atmel_pwm_config_v1(struct pwm_chip *chip, struct pwm_device *pwm, - unsigned long dty, unsigned long prd) -{ - struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); - unsigned int val; - + *cprd = cycles; - atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CUPD, dty); - - val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); - val &= ~PWM_CMR_UPD_CDTY; - atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val); - - /* - * If the PWM channel is enabled, only update CDTY by using the update - * register, it needs to set bit 10 of CMR to 0 - */ - if (pwm_is_enabled(pwm)) - return; - /* - * If the PWM channel is disabled, write value to duty and period - * registers directly. - */ - atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CDTY, dty); - atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CPRD, prd); + return 0; } -static void atmel_pwm_config_v2(struct pwm_chip *chip, struct pwm_device *pwm, - unsigned long dty, unsigned long prd) +static void atmel_pwm_calculate_cdty(const struct pwm_state *state, + unsigned long cprd, unsigned long *cdty) { - struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); + unsigned long long cycles = state->duty_cycle; - if (pwm_is_enabled(pwm)) { - /* - * If the PWM channel is enabled, using the duty update register - * to update the value. - */ - atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV2_CDTYUPD, dty); - } else { - /* - * If the PWM channel is disabled, write value to duty and - * period registers directly. - */ - atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV2_CDTY, dty); - atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV2_CPRD, prd); - } + cycles *= cprd; + do_div(cycles, state->period); + *cdty = cprd - cycles; } -static int atmel_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm, - enum pwm_polarity polarity) +static void atmel_pwm_update_cdty(struct pwm_chip *chip, struct pwm_device *pwm, + unsigned long cdty) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); u32 val; - int ret; - - val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); - if (polarity == PWM_POLARITY_NORMAL) - val &= ~PWM_CMR_CPOL; - else - val |= PWM_CMR_CPOL; - - ret = clk_enable(atmel_pwm->clk); - if (ret) { - dev_err(chip->dev, "failed to enable PWM clock\n"); - return ret; + if (atmel_pwm->regs->duty_upd == + atmel_pwm->regs->period_upd) { + val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); + val &= ~PWM_CMR_UPD_CDTY; + atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val); } - atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val); - - clk_disable(atmel_pwm->clk); - - return 0; + atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, + atmel_pwm->regs->duty_upd, cdty); } -static int atmel_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +static void atmel_pwm_set_cprd_cdty(struct pwm_chip *chip, + struct pwm_device *pwm, + unsigned long cprd, unsigned long cdty) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); - int ret; - - ret = clk_enable(atmel_pwm->clk); - if (ret) { - dev_err(chip->dev, "failed to enable PWM clock\n"); - return ret; - } - atmel_pwm_writel(atmel_pwm, PWM_ENA, 1 << pwm->hwpwm); - - return 0; + atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, + atmel_pwm->regs->duty, cdty); + atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, + atmel_pwm->regs->period, cprd); } -static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) +static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm, + bool disable_clk) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); unsigned long timeout = jiffies + 2 * HZ; @@ -282,37 +205,99 @@ static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) time_before(jiffies, timeout)) usleep_range(10, 100); - clk_disable(atmel_pwm->clk); + if (disable_clk) + clk_disable(atmel_pwm->clk); +} + +static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); + struct pwm_state cstate; + unsigned long cprd, cdty; + u32 pres, val; + int ret; + + pwm_get_state(pwm, &cstate); + + if (state->enabled) { + if (cstate.enabled && + cstate.polarity == state->polarity && + cstate.period == state->period) { + cprd = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, + atmel_pwm->regs->period); + atmel_pwm_calculate_cdty(state, cprd, &cdty); + atmel_pwm_update_cdty(chip, pwm, cdty); + return 0; + } + + ret = atmel_pwm_calculate_cprd_and_pres(chip, state, &cprd, + &pres); + if (ret) { + dev_err(chip->dev, + "failed to calculate cprd and prescaler\n"); + return ret; + } + + atmel_pwm_calculate_cdty(state, cprd, &cdty); + + if (cstate.enabled) { + atmel_pwm_disable(chip, pwm, false); + } else { + ret = clk_enable(atmel_pwm->clk); + if (ret) { + dev_err(chip->dev, "failed to enable clock\n"); + return ret; + } + } + + /* It is necessary to preserve CPOL, inside CMR */ + val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); + val = (val & ~PWM_CMR_CPRE_MSK) | (pres & PWM_CMR_CPRE_MSK); + if (state->polarity == PWM_POLARITY_NORMAL) + val &= ~PWM_CMR_CPOL; + else + val |= PWM_CMR_CPOL; + atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val); + atmel_pwm_set_cprd_cdty(chip, pwm, cprd, cdty); + mutex_lock(&atmel_pwm->isr_lock); + atmel_pwm->updated_pwms |= atmel_pwm_readl(atmel_pwm, PWM_ISR); + atmel_pwm->updated_pwms &= ~(1 << pwm->hwpwm); + mutex_unlock(&atmel_pwm->isr_lock); + atmel_pwm_writel(atmel_pwm, PWM_ENA, 1 << pwm->hwpwm); + } else if (cstate.enabled) { + atmel_pwm_disable(chip, pwm, true); + } + + return 0; } static const struct pwm_ops atmel_pwm_ops = { - .config = atmel_pwm_config, - .set_polarity = atmel_pwm_set_polarity, - .enable = atmel_pwm_enable, - .disable = atmel_pwm_disable, + .apply = atmel_pwm_apply, .owner = THIS_MODULE, }; -struct atmel_pwm_data { - void (*config)(struct pwm_chip *chip, struct pwm_device *pwm, - unsigned long dty, unsigned long prd); +static const struct atmel_pwm_registers atmel_pwm_regs_v1 = { + .period = PWMV1_CPRD, + .period_upd = PWMV1_CUPD, + .duty = PWMV1_CDTY, + .duty_upd = PWMV1_CUPD, }; -static const struct atmel_pwm_data atmel_pwm_data_v1 = { - .config = atmel_pwm_config_v1, -}; - -static const struct atmel_pwm_data atmel_pwm_data_v2 = { - .config = atmel_pwm_config_v2, +static const struct atmel_pwm_registers atmel_pwm_regs_v2 = { + .period = PWMV2_CPRD, + .period_upd = PWMV2_CPRDUPD, + .duty = PWMV2_CDTY, + .duty_upd = PWMV2_CDTYUPD, }; static const struct platform_device_id atmel_pwm_devtypes[] = { { .name = "at91sam9rl-pwm", - .driver_data = (kernel_ulong_t)&atmel_pwm_data_v1, + .driver_data = (kernel_ulong_t)&atmel_pwm_regs_v1, }, { .name = "sama5d3-pwm", - .driver_data = (kernel_ulong_t)&atmel_pwm_data_v2, + .driver_data = (kernel_ulong_t)&atmel_pwm_regs_v2, }, { /* sentinel */ }, @@ -322,17 +307,20 @@ MODULE_DEVICE_TABLE(platform, atmel_pwm_devtypes); static const struct of_device_id atmel_pwm_dt_ids[] = { { .compatible = "atmel,at91sam9rl-pwm", - .data = &atmel_pwm_data_v1, + .data = &atmel_pwm_regs_v1, }, { .compatible = "atmel,sama5d3-pwm", - .data = &atmel_pwm_data_v2, + .data = &atmel_pwm_regs_v2, + }, { + .compatible = "atmel,sama5d2-pwm", + .data = &atmel_pwm_regs_v2, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids); -static inline const struct atmel_pwm_data * +static inline const struct atmel_pwm_registers * atmel_pwm_get_driver_data(struct platform_device *pdev) { const struct platform_device_id *id; @@ -342,18 +330,18 @@ atmel_pwm_get_driver_data(struct platform_device *pdev) id = platform_get_device_id(pdev); - return (struct atmel_pwm_data *)id->driver_data; + return (struct atmel_pwm_registers *)id->driver_data; } static int atmel_pwm_probe(struct platform_device *pdev) { - const struct atmel_pwm_data *data; + const struct atmel_pwm_registers *regs; struct atmel_pwm_chip *atmel_pwm; struct resource *res; int ret; - data = atmel_pwm_get_driver_data(pdev); - if (!data) + regs = atmel_pwm_get_driver_data(pdev); + if (!regs) return -ENODEV; atmel_pwm = devm_kzalloc(&pdev->dev, sizeof(*atmel_pwm), GFP_KERNEL); @@ -385,7 +373,7 @@ static int atmel_pwm_probe(struct platform_device *pdev) atmel_pwm->chip.base = -1; atmel_pwm->chip.npwm = 4; - atmel_pwm->config = data->config; + atmel_pwm->regs = regs; atmel_pwm->updated_pwms = 0; mutex_init(&atmel_pwm->isr_lock); diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c new file mode 100644 index 000000000000..5c11bc708a3c --- /dev/null +++ b/drivers/pwm/pwm-mediatek.c @@ -0,0 +1,219 @@ +/* + * Mediatek Pulse Width Modulator driver + * + * Copyright (C) 2015 John Crispin <blogic@openwrt.org> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/err.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pwm.h> +#include <linux/slab.h> +#include <linux/types.h> + +/* PWM registers and bits definitions */ +#define PWMCON 0x00 +#define PWMHDUR 0x04 +#define PWMLDUR 0x08 +#define PWMGDUR 0x0c +#define PWMWAVENUM 0x28 +#define PWMDWIDTH 0x2c +#define PWMTHRES 0x30 + +enum { + MTK_CLK_MAIN = 0, + MTK_CLK_TOP, + MTK_CLK_PWM1, + MTK_CLK_PWM2, + MTK_CLK_PWM3, + MTK_CLK_PWM4, + MTK_CLK_PWM5, + MTK_CLK_MAX, +}; + +static const char * const mtk_pwm_clk_name[] = { + "main", "top", "pwm1", "pwm2", "pwm3", "pwm4", "pwm5" +}; + +/** + * struct mtk_pwm_chip - struct representing PWM chip + * @chip: linux PWM chip representation + * @regs: base address of PWM chip + * @clks: list of clocks + */ +struct mtk_pwm_chip { + struct pwm_chip chip; + void __iomem *regs; + struct clk *clks[MTK_CLK_MAX]; +}; + +static inline struct mtk_pwm_chip *to_mtk_pwm_chip(struct pwm_chip *chip) +{ + return container_of(chip, struct mtk_pwm_chip, chip); +} + +static inline u32 mtk_pwm_readl(struct mtk_pwm_chip *chip, unsigned int num, + unsigned int offset) +{ + return readl(chip->regs + 0x10 + (num * 0x40) + offset); +} + +static inline void mtk_pwm_writel(struct mtk_pwm_chip *chip, + unsigned int num, unsigned int offset, + u32 value) +{ + writel(value, chip->regs + 0x10 + (num * 0x40) + offset); +} + +static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns) +{ + struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip); + struct clk *clk = pc->clks[MTK_CLK_PWM1 + pwm->hwpwm]; + u32 resolution, clkdiv = 0; + + resolution = NSEC_PER_SEC / clk_get_rate(clk); + + while (period_ns / resolution > 8191) { + resolution *= 2; + clkdiv++; + } + + if (clkdiv > 7) + return -EINVAL; + + mtk_pwm_writel(pc, pwm->hwpwm, PWMCON, BIT(15) | BIT(3) | clkdiv); + mtk_pwm_writel(pc, pwm->hwpwm, PWMDWIDTH, period_ns / resolution); + mtk_pwm_writel(pc, pwm->hwpwm, PWMTHRES, duty_ns / resolution); + + return 0; +} + +static int mtk_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip); + u32 value; + int ret; + + ret = clk_prepare(pc->clks[MTK_CLK_PWM1 + pwm->hwpwm]); + if (ret < 0) + return ret; + + value = readl(pc->regs); + value |= BIT(pwm->hwpwm); + writel(value, pc->regs); + + return 0; +} + +static void mtk_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip); + u32 value; + + value = readl(pc->regs); + value &= ~BIT(pwm->hwpwm); + writel(value, pc->regs); + + clk_unprepare(pc->clks[MTK_CLK_PWM1 + pwm->hwpwm]); +} + +static const struct pwm_ops mtk_pwm_ops = { + .config = mtk_pwm_config, + .enable = mtk_pwm_enable, + .disable = mtk_pwm_disable, + .owner = THIS_MODULE, +}; + +static int mtk_pwm_probe(struct platform_device *pdev) +{ + struct mtk_pwm_chip *pc; + struct resource *res; + unsigned int i; + int ret; + + pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL); + if (!pc) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pc->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pc->regs)) + return PTR_ERR(pc->regs); + + for (i = 0; i < MTK_CLK_MAX; i++) { + pc->clks[i] = devm_clk_get(&pdev->dev, mtk_pwm_clk_name[i]); + if (IS_ERR(pc->clks[i])) + return PTR_ERR(pc->clks[i]); + } + + ret = clk_prepare(pc->clks[MTK_CLK_TOP]); + if (ret < 0) + return ret; + + ret = clk_prepare(pc->clks[MTK_CLK_MAIN]); + if (ret < 0) + goto disable_clk_top; + + platform_set_drvdata(pdev, pc); + + pc->chip.dev = &pdev->dev; + pc->chip.ops = &mtk_pwm_ops; + pc->chip.base = -1; + pc->chip.npwm = 5; + + ret = pwmchip_add(&pc->chip); + if (ret < 0) { + dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); + goto disable_clk_main; + } + + return 0; + +disable_clk_main: + clk_unprepare(pc->clks[MTK_CLK_MAIN]); +disable_clk_top: + clk_unprepare(pc->clks[MTK_CLK_TOP]); + + return ret; +} + +static int mtk_pwm_remove(struct platform_device *pdev) +{ + struct mtk_pwm_chip *pc = platform_get_drvdata(pdev); + unsigned int i; + + for (i = 0; i < pc->chip.npwm; i++) + pwm_disable(&pc->chip.pwms[i]); + + return pwmchip_remove(&pc->chip); +} + +static const struct of_device_id mtk_pwm_of_match[] = { + { .compatible = "mediatek,mt7623-pwm" }, + { } +}; +MODULE_DEVICE_TABLE(of, mtk_pwm_of_match); + +static struct platform_driver mtk_pwm_driver = { + .driver = { + .name = "mtk-pwm", + .of_match_table = mtk_pwm_of_match, + }, + .probe = mtk_pwm_probe, + .remove = mtk_pwm_remove, +}; +module_platform_driver(mtk_pwm_driver); + +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); +MODULE_ALIAS("platform:mtk-pwm"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c index 0cfb3571a732..5f55cfab9b1c 100644 --- a/drivers/pwm/pwm-pca9685.c +++ b/drivers/pwm/pwm-pca9685.c @@ -30,6 +30,7 @@ #include <linux/regmap.h> #include <linux/slab.h> #include <linux/delay.h> +#include <linux/pm_runtime.h> /* * Because the PCA9685 has only one prescaler per chip, changing the period of @@ -79,7 +80,6 @@ struct pca9685 { struct pwm_chip chip; struct regmap *regmap; - int active_cnt; int duty_ns; int period_ns; #if IS_ENABLED(CONFIG_GPIOLIB) @@ -111,20 +111,10 @@ static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset) pwm_set_chip_data(pwm, (void *)1); mutex_unlock(&pca->lock); + pm_runtime_get_sync(pca->chip.dev); return 0; } -static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset) -{ - struct pca9685 *pca = gpiochip_get_data(gpio); - struct pwm_device *pwm; - - mutex_lock(&pca->lock); - pwm = &pca->chip.pwms[offset]; - pwm_set_chip_data(pwm, NULL); - mutex_unlock(&pca->lock); -} - static bool pca9685_pwm_is_gpio(struct pca9685 *pca, struct pwm_device *pwm) { bool is_gpio = false; @@ -177,6 +167,19 @@ static void pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset, regmap_write(pca->regmap, LED_N_ON_H(pwm->hwpwm), on); } +static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset) +{ + struct pca9685 *pca = gpiochip_get_data(gpio); + struct pwm_device *pwm; + + pca9685_pwm_gpio_set(gpio, offset, 0); + pm_runtime_put(pca->chip.dev); + mutex_lock(&pca->lock); + pwm = &pca->chip.pwms[offset]; + pwm_set_chip_data(pwm, NULL); + mutex_unlock(&pca->lock); +} + static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { @@ -238,6 +241,16 @@ static inline int pca9685_pwm_gpio_probe(struct pca9685 *pca) } #endif +static void pca9685_set_sleep_mode(struct pca9685 *pca, int sleep) +{ + regmap_update_bits(pca->regmap, PCA9685_MODE1, + MODE1_SLEEP, sleep ? MODE1_SLEEP : 0); + if (!sleep) { + /* Wait 500us for the oscillator to be back up */ + udelay(500); + } +} + static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { @@ -252,19 +265,20 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, if (prescale >= PCA9685_PRESCALE_MIN && prescale <= PCA9685_PRESCALE_MAX) { + /* + * putting the chip briefly into SLEEP mode + * at this point won't interfere with the + * pm_runtime framework, because the pm_runtime + * state is guaranteed active here. + */ /* Put chip into sleep mode */ - regmap_update_bits(pca->regmap, PCA9685_MODE1, - MODE1_SLEEP, MODE1_SLEEP); + pca9685_set_sleep_mode(pca, 1); /* Change the chip-wide output frequency */ regmap_write(pca->regmap, PCA9685_PRESCALE, prescale); /* Wake the chip up */ - regmap_update_bits(pca->regmap, PCA9685_MODE1, - MODE1_SLEEP, 0x0); - - /* Wait 500us for the oscillator to be back up */ - udelay(500); + pca9685_set_sleep_mode(pca, 0); pca->period_ns = period_ns; } else { @@ -406,21 +420,15 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) if (pca9685_pwm_is_gpio(pca, pwm)) return -EBUSY; - - if (pca->active_cnt++ == 0) - return regmap_update_bits(pca->regmap, PCA9685_MODE1, - MODE1_SLEEP, 0x0); + pm_runtime_get_sync(chip->dev); return 0; } static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) { - struct pca9685 *pca = to_pca(chip); - - if (--pca->active_cnt == 0) - regmap_update_bits(pca->regmap, PCA9685_MODE1, MODE1_SLEEP, - MODE1_SLEEP); + pca9685_pwm_disable(chip, pwm); + pm_runtime_put(chip->dev); } static const struct pwm_ops pca9685_pwm_ops = { @@ -492,22 +500,54 @@ static int pca9685_pwm_probe(struct i2c_client *client, return ret; ret = pca9685_pwm_gpio_probe(pca); - if (ret < 0) + if (ret < 0) { pwmchip_remove(&pca->chip); + return ret; + } + + /* the chip comes out of power-up in the active state */ + pm_runtime_set_active(&client->dev); + /* + * enable will put the chip into suspend, which is what we + * want as all outputs are disabled at this point + */ + pm_runtime_enable(&client->dev); - return ret; + return 0; } static int pca9685_pwm_remove(struct i2c_client *client) { struct pca9685 *pca = i2c_get_clientdata(client); + int ret; - regmap_update_bits(pca->regmap, PCA9685_MODE1, MODE1_SLEEP, - MODE1_SLEEP); + ret = pwmchip_remove(&pca->chip); + if (ret) + return ret; + pm_runtime_disable(&client->dev); + return 0; +} - return pwmchip_remove(&pca->chip); +#ifdef CONFIG_PM +static int pca9685_pwm_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct pca9685 *pca = i2c_get_clientdata(client); + + pca9685_set_sleep_mode(pca, 1); + return 0; } +static int pca9685_pwm_runtime_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct pca9685 *pca = i2c_get_clientdata(client); + + pca9685_set_sleep_mode(pca, 0); + return 0; +} +#endif + static const struct i2c_device_id pca9685_id[] = { { "pca9685", 0 }, { /* sentinel */ }, @@ -530,11 +570,17 @@ static const struct of_device_id pca9685_dt_ids[] = { MODULE_DEVICE_TABLE(of, pca9685_dt_ids); #endif +static const struct dev_pm_ops pca9685_pwm_pm = { + SET_RUNTIME_PM_OPS(pca9685_pwm_runtime_suspend, + pca9685_pwm_runtime_resume, NULL) +}; + static struct i2c_driver pca9685_i2c_driver = { .driver = { .name = "pca9685-pwm", .acpi_match_table = ACPI_PTR(pca9685_acpi_ids), .of_match_table = of_match_ptr(pca9685_dt_ids), + .pm = &pca9685_pwm_pm, }, .probe = pca9685_pwm_probe, .remove = pca9685_pwm_remove, diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index e4647840cd6e..8c6ed556db28 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c @@ -29,6 +29,7 @@ #include <linux/of_device.h> #include <linux/pwm.h> #include <linux/platform_device.h> +#include <linux/pinctrl/consumer.h> #include <linux/slab.h> #include <linux/reset.h> @@ -49,6 +50,8 @@ struct tegra_pwm_chip { struct clk *clk; struct reset_control*rst; + unsigned long clk_rate; + void __iomem *regs; const struct tegra_pwm_soc *soc; @@ -74,8 +77,8 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip); - unsigned long long c = duty_ns; - unsigned long rate, hz; + unsigned long long c = duty_ns, hz; + unsigned long rate; u32 val = 0; int err; @@ -85,8 +88,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, * nearest integer during division. */ c *= (1 << PWM_DUTY_WIDTH); - c += period_ns / 2; - do_div(c, period_ns); + c = DIV_ROUND_CLOSEST_ULL(c, period_ns); val = (u32)c << PWM_DUTY_SHIFT; @@ -94,10 +96,11 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, * Compute the prescaler value for which (1 << PWM_DUTY_WIDTH) * cycles at the PWM clock rate will take period_ns nanoseconds. */ - rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH; - hz = NSEC_PER_SEC / period_ns; + rate = pc->clk_rate >> PWM_DUTY_WIDTH; - rate = (rate + (hz / 2)) / hz; + /* Consider precision in PWM_SCALE_WIDTH rate calculation */ + hz = DIV_ROUND_CLOSEST_ULL(100ULL * NSEC_PER_SEC, period_ns); + rate = DIV_ROUND_CLOSEST_ULL(100ULL * rate, hz); /* * Since the actual PWM divider is the register's frequency divider @@ -198,6 +201,9 @@ static int tegra_pwm_probe(struct platform_device *pdev) if (IS_ERR(pwm->clk)) return PTR_ERR(pwm->clk); + /* Read PWM clock rate from source */ + pwm->clk_rate = clk_get_rate(pwm->clk); + pwm->rst = devm_reset_control_get(&pdev->dev, "pwm"); if (IS_ERR(pwm->rst)) { ret = PTR_ERR(pwm->rst); @@ -253,6 +259,18 @@ static int tegra_pwm_remove(struct platform_device *pdev) return pwmchip_remove(&pc->chip); } +#ifdef CONFIG_PM_SLEEP +static int tegra_pwm_suspend(struct device *dev) +{ + return pinctrl_pm_select_sleep_state(dev); +} + +static int tegra_pwm_resume(struct device *dev) +{ + return pinctrl_pm_select_default_state(dev); +} +#endif + static const struct tegra_pwm_soc tegra20_pwm_soc = { .num_channels = 4, }; @@ -269,10 +287,15 @@ static const struct of_device_id tegra_pwm_of_match[] = { MODULE_DEVICE_TABLE(of, tegra_pwm_of_match); +static const struct dev_pm_ops tegra_pwm_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(tegra_pwm_suspend, tegra_pwm_resume) +}; + static struct platform_driver tegra_pwm_driver = { .driver = { .name = "tegra-pwm", .of_match_table = tegra_pwm_of_match, + .pm = &tegra_pwm_pm_ops, }, .probe = tegra_pwm_probe, .remove = tegra_pwm_remove, diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 0142cc3f0c91..294634836b32 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -71,7 +71,7 @@ EXPORT_SYMBOL(rproc_vq_interrupt); static struct virtqueue *rp_find_vq(struct virtio_device *vdev, unsigned int id, void (*callback)(struct virtqueue *vq), - const char *name) + const char *name, bool ctx) { struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); struct rproc *rproc = vdev_to_rproc(vdev); @@ -103,8 +103,8 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev, * Create the new vq, and tell virtio we're not interested in * the 'weak' smp barriers, since we're talking with a real device. */ - vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, addr, - rproc_virtio_notify, callback, name); + vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, ctx, + addr, rproc_virtio_notify, callback, name); if (!vq) { dev_err(dev, "vring_new_virtqueue %s failed\n", name); rproc_free_vring(rvring); @@ -138,12 +138,14 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], + const bool * ctx, struct irq_affinity *desc) { int i, ret; for (i = 0; i < nvqs; ++i) { - vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i]); + vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i], + ctx ? ctx[i] : false); if (IS_ERR(vqs[i])) { ret = PTR_ERR(vqs[i]); goto error; diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 5e66e081027e..f7cade09d38a 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -869,7 +869,7 @@ static int rpmsg_probe(struct virtio_device *vdev) init_waitqueue_head(&vrp->sendq); /* We expect two virtqueues, rx and tx (and in this order) */ - err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names, NULL); + err = virtio_find_vqs(vdev, 2, vqs, vq_cbs, names, NULL); if (err) goto free_vrp; diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index ee1b0e9dde79..8d3b95728326 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -1303,10 +1303,10 @@ config RTC_DRV_SA1100 config RTC_DRV_SH tristate "SuperH On-Chip RTC" - depends on SUPERH && HAVE_CLK + depends on SUPERH || ARCH_RENESAS help Say Y here to enable support for the on-chip RTC found in - most SuperH processors. + most SuperH processors. This RTC is also found in RZ/A SoCs. To compile this driver as a module, choose M here: the module will be called rtc-sh. @@ -1731,6 +1731,13 @@ config RTC_DRV_STM32 This driver can also be built as a module, if so, the module will be called "rtc-stm32". +config RTC_DRV_CPCAP + depends on MFD_CPCAP + tristate "Motorola CPCAP RTC" + help + Say y here for CPCAP rtc found on some Motorola phones + and tablets such as Droid 4. + comment "HID Sensor RTC drivers" config RTC_DRV_HID_SENSOR_TIME diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index f07297b1460a..13857d2fce09 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -40,6 +40,7 @@ obj-$(CONFIG_RTC_DRV_BQ32K) += rtc-bq32k.o obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o +obj-$(CONFIG_RTC_DRV_CPCAP) += rtc-cpcap.o obj-$(CONFIG_RTC_DRV_DA9052) += rtc-da9052.o obj-$(CONFIG_RTC_DRV_DA9055) += rtc-da9055.o obj-$(CONFIG_RTC_DRV_DA9063) += rtc-da9063.o diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c index 2b223935001f..98ac8d5c7901 100644 --- a/drivers/rtc/rtc-bq32k.c +++ b/drivers/rtc/rtc-bq32k.c @@ -310,9 +310,16 @@ static const struct i2c_device_id bq32k_id[] = { }; MODULE_DEVICE_TABLE(i2c, bq32k_id); +static const struct of_device_id bq32k_of_match[] = { + { .compatible = "ti,bq32000" }, + { } +}; +MODULE_DEVICE_TABLE(of, bq32k_of_match); + static struct i2c_driver bq32k_driver = { .driver = { .name = "bq32k", + .of_match_table = of_match_ptr(bq32k_of_match), }, .probe = bq32k_probe, .remove = bq32k_remove, diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index f4a96dbdabf2..b3de973a6260 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -41,6 +41,9 @@ #include <linux/pm.h> #include <linux/of.h> #include <linux/of_platform.h> +#ifdef CONFIG_X86 +#include <asm/i8259.h> +#endif /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */ #include <linux/mc146818rtc.h> @@ -1193,17 +1196,23 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) { cmos_wake_setup(&pnp->dev); - if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) + if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) { + unsigned int irq = 0; +#ifdef CONFIG_X86 /* Some machines contain a PNP entry for the RTC, but * don't define the IRQ. It should always be safe to - * hardcode it in these cases + * hardcode it on systems with a legacy PIC. */ + if (nr_legacy_irqs()) + irq = 8; +#endif return cmos_do_probe(&pnp->dev, - pnp_get_resource(pnp, IORESOURCE_IO, 0), 8); - else + pnp_get_resource(pnp, IORESOURCE_IO, 0), irq); + } else { return cmos_do_probe(&pnp->dev, pnp_get_resource(pnp, IORESOURCE_IO, 0), pnp_irq(pnp, 0)); + } } static void cmos_pnp_remove(struct pnp_dev *pnp) diff --git a/drivers/rtc/rtc-cpcap.c b/drivers/rtc/rtc-cpcap.c new file mode 100644 index 000000000000..3a0333e1f21a --- /dev/null +++ b/drivers/rtc/rtc-cpcap.c @@ -0,0 +1,330 @@ +/* + * Motorola CPCAP PMIC RTC driver + * + * Based on cpcap-regulator.c from Motorola Linux kernel tree + * Copyright (C) 2009 Motorola, Inc. + * + * Rewritten for mainline kernel + * - use DT + * - use regmap + * - use standard interrupt framework + * - use managed device resources + * - remove custom "secure clock daemon" helpers + * + * Copyright (C) 2017 Sebastian Reichel <sre@kernel.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/rtc.h> +#include <linux/err.h> +#include <linux/regmap.h> +#include <linux/mfd/motorola-cpcap.h> +#include <linux/slab.h> +#include <linux/sched.h> + +#define SECS_PER_DAY 86400 +#define DAY_MASK 0x7FFF +#define TOD1_MASK 0x00FF +#define TOD2_MASK 0x01FF + +struct cpcap_time { + int day; + int tod1; + int tod2; +}; + +struct cpcap_rtc { + struct regmap *regmap; + struct rtc_device *rtc_dev; + u16 vendor; + int alarm_irq; + bool alarm_enabled; + int update_irq; + bool update_enabled; +}; + +static void cpcap2rtc_time(struct rtc_time *rtc, struct cpcap_time *cpcap) +{ + unsigned long int tod; + unsigned long int time; + + tod = (cpcap->tod1 & TOD1_MASK) | ((cpcap->tod2 & TOD2_MASK) << 8); + time = tod + ((cpcap->day & DAY_MASK) * SECS_PER_DAY); + + rtc_time_to_tm(time, rtc); +} + +static void rtc2cpcap_time(struct cpcap_time *cpcap, struct rtc_time *rtc) +{ + unsigned long time; + + rtc_tm_to_time(rtc, &time); + + cpcap->day = time / SECS_PER_DAY; + time %= SECS_PER_DAY; + cpcap->tod2 = (time >> 8) & TOD2_MASK; + cpcap->tod1 = time & TOD1_MASK; +} + +static int cpcap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct cpcap_rtc *rtc = dev_get_drvdata(dev); + + if (rtc->alarm_enabled == enabled) + return 0; + + if (enabled) + enable_irq(rtc->alarm_irq); + else + disable_irq(rtc->alarm_irq); + + rtc->alarm_enabled = !!enabled; + + return 0; +} + +static int cpcap_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct cpcap_rtc *rtc; + struct cpcap_time cpcap_tm; + int temp_tod2; + int ret; + + rtc = dev_get_drvdata(dev); + + ret = regmap_read(rtc->regmap, CPCAP_REG_TOD2, &temp_tod2); + ret |= regmap_read(rtc->regmap, CPCAP_REG_DAY, &cpcap_tm.day); + ret |= regmap_read(rtc->regmap, CPCAP_REG_TOD1, &cpcap_tm.tod1); + ret |= regmap_read(rtc->regmap, CPCAP_REG_TOD2, &cpcap_tm.tod2); + + if (temp_tod2 > cpcap_tm.tod2) + ret |= regmap_read(rtc->regmap, CPCAP_REG_DAY, &cpcap_tm.day); + + if (ret) { + dev_err(dev, "Failed to read time\n"); + return -EIO; + } + + cpcap2rtc_time(tm, &cpcap_tm); + + return rtc_valid_tm(tm); +} + +static int cpcap_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct cpcap_rtc *rtc; + struct cpcap_time cpcap_tm; + int ret = 0; + + rtc = dev_get_drvdata(dev); + + rtc2cpcap_time(&cpcap_tm, tm); + + if (rtc->alarm_enabled) + disable_irq(rtc->alarm_irq); + if (rtc->update_enabled) + disable_irq(rtc->update_irq); + + if (rtc->vendor == CPCAP_VENDOR_ST) { + /* The TOD1 and TOD2 registers MUST be written in this order + * for the change to properly set. + */ + ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_TOD1, + TOD1_MASK, cpcap_tm.tod1); + ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_TOD2, + TOD2_MASK, cpcap_tm.tod2); + ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_DAY, + DAY_MASK, cpcap_tm.day); + } else { + /* Clearing the upper lower 8 bits of the TOD guarantees that + * the upper half of TOD (TOD2) will not increment for 0xFF RTC + * ticks (255 seconds). During this time we can safely write + * to DAY, TOD2, then TOD1 (in that order) and expect RTC to be + * synchronized to the exact time requested upon the final write + * to TOD1. + */ + ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_TOD1, + TOD1_MASK, 0); + ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_DAY, + DAY_MASK, cpcap_tm.day); + ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_TOD2, + TOD2_MASK, cpcap_tm.tod2); + ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_TOD1, + TOD1_MASK, cpcap_tm.tod1); + } + + if (rtc->update_enabled) + enable_irq(rtc->update_irq); + if (rtc->alarm_enabled) + enable_irq(rtc->alarm_irq); + + return ret; +} + +static int cpcap_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct cpcap_rtc *rtc; + struct cpcap_time cpcap_tm; + int ret; + + rtc = dev_get_drvdata(dev); + + alrm->enabled = rtc->alarm_enabled; + + ret = regmap_read(rtc->regmap, CPCAP_REG_DAYA, &cpcap_tm.day); + ret |= regmap_read(rtc->regmap, CPCAP_REG_TODA2, &cpcap_tm.tod2); + ret |= regmap_read(rtc->regmap, CPCAP_REG_TODA1, &cpcap_tm.tod1); + + if (ret) { + dev_err(dev, "Failed to read time\n"); + return -EIO; + } + + cpcap2rtc_time(&alrm->time, &cpcap_tm); + return rtc_valid_tm(&alrm->time); +} + +static int cpcap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct cpcap_rtc *rtc; + struct cpcap_time cpcap_tm; + int ret; + + rtc = dev_get_drvdata(dev); + + rtc2cpcap_time(&cpcap_tm, &alrm->time); + + if (rtc->alarm_enabled) + disable_irq(rtc->alarm_irq); + + ret = regmap_update_bits(rtc->regmap, CPCAP_REG_DAYA, DAY_MASK, + cpcap_tm.day); + ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_TODA2, TOD2_MASK, + cpcap_tm.tod2); + ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_TODA1, TOD1_MASK, + cpcap_tm.tod1); + + if (!ret) { + enable_irq(rtc->alarm_irq); + rtc->alarm_enabled = true; + } + + return ret; +} + +static const struct rtc_class_ops cpcap_rtc_ops = { + .read_time = cpcap_rtc_read_time, + .set_time = cpcap_rtc_set_time, + .read_alarm = cpcap_rtc_read_alarm, + .set_alarm = cpcap_rtc_set_alarm, + .alarm_irq_enable = cpcap_rtc_alarm_irq_enable, +}; + +static irqreturn_t cpcap_rtc_alarm_irq(int irq, void *data) +{ + struct cpcap_rtc *rtc = data; + + rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF); + return IRQ_HANDLED; +} + +static irqreturn_t cpcap_rtc_update_irq(int irq, void *data) +{ + struct cpcap_rtc *rtc = data; + + rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF); + return IRQ_HANDLED; +} + +static int cpcap_rtc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cpcap_rtc *rtc; + int err; + + rtc = devm_kzalloc(dev, sizeof(*rtc), GFP_KERNEL); + if (!rtc) + return -ENOMEM; + + rtc->regmap = dev_get_regmap(dev->parent, NULL); + if (!rtc->regmap) + return -ENODEV; + + platform_set_drvdata(pdev, rtc); + rtc->rtc_dev = devm_rtc_device_register(dev, "cpcap_rtc", + &cpcap_rtc_ops, THIS_MODULE); + + if (IS_ERR(rtc->rtc_dev)) + return PTR_ERR(rtc->rtc_dev); + + err = cpcap_get_vendor(dev, rtc->regmap, &rtc->vendor); + if (err) + return err; + + rtc->alarm_irq = platform_get_irq(pdev, 0); + err = devm_request_threaded_irq(dev, rtc->alarm_irq, NULL, + cpcap_rtc_alarm_irq, IRQF_TRIGGER_NONE, + "rtc_alarm", rtc); + if (err) { + dev_err(dev, "Could not request alarm irq: %d\n", err); + return err; + } + disable_irq(rtc->alarm_irq); + + /* Stock Android uses the 1 Hz interrupt for "secure clock daemon", + * which is not supported by the mainline kernel. The mainline kernel + * does not use the irq at the moment, but we explicitly request and + * disable it, so that its masked and does not wake up the processor + * every second. + */ + rtc->update_irq = platform_get_irq(pdev, 1); + err = devm_request_threaded_irq(dev, rtc->update_irq, NULL, + cpcap_rtc_update_irq, IRQF_TRIGGER_NONE, + "rtc_1hz", rtc); + if (err) { + dev_err(dev, "Could not request update irq: %d\n", err); + return err; + } + disable_irq(rtc->update_irq); + + err = device_init_wakeup(dev, 1); + if (err) { + dev_err(dev, "wakeup initialization failed (%d)\n", err); + /* ignore error and continue without wakeup support */ + } + + return 0; +} + +static const struct of_device_id cpcap_rtc_of_match[] = { + { .compatible = "motorola,cpcap-rtc", }, + {}, +}; +MODULE_DEVICE_TABLE(of, cpcap_rtc_of_match); + +static struct platform_driver cpcap_rtc_driver = { + .probe = cpcap_rtc_probe, + .driver = { + .name = "cpcap-rtc", + .of_match_table = cpcap_rtc_of_match, + }, +}; + +module_platform_driver(cpcap_rtc_driver); + +MODULE_ALIAS("platform:cpcap-rtc"); +MODULE_DESCRIPTION("CPCAP RTC driver"); +MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 4ad97be48043..77339b3d50a1 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -16,6 +16,7 @@ #include <linux/i2c.h> #include <linux/init.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/rtc/ds1307.h> #include <linux/rtc.h> #include <linux/slab.h> @@ -38,6 +39,7 @@ enum ds_type { ds_1340, ds_1388, ds_3231, + m41t0, m41t00, mcp794xx, rx_8025, @@ -52,6 +54,7 @@ enum ds_type { # define DS1340_BIT_nEOSC 0x80 # define MCP794XX_BIT_ST 0x80 #define DS1307_REG_MIN 0x01 /* 00-59 */ +# define M41T0_BIT_OF 0x80 #define DS1307_REG_HOUR 0x02 /* 00-23, or 1-12{am,pm} */ # define DS1307_BIT_12HR 0x40 /* in REG_HOUR */ # define DS1307_BIT_PM 0x20 /* in REG_HOUR */ @@ -182,6 +185,7 @@ static const struct i2c_device_id ds1307_id[] = { { "ds1388", ds_1388 }, { "ds1340", ds_1340 }, { "ds3231", ds_3231 }, + { "m41t0", m41t0 }, { "m41t00", m41t00 }, { "mcp7940x", mcp794xx }, { "mcp7941x", mcp794xx }, @@ -192,6 +196,69 @@ static const struct i2c_device_id ds1307_id[] = { }; MODULE_DEVICE_TABLE(i2c, ds1307_id); +#ifdef CONFIG_OF +static const struct of_device_id ds1307_of_match[] = { + { + .compatible = "dallas,ds1307", + .data = (void *)ds_1307 + }, + { + .compatible = "dallas,ds1337", + .data = (void *)ds_1337 + }, + { + .compatible = "dallas,ds1338", + .data = (void *)ds_1338 + }, + { + .compatible = "dallas,ds1339", + .data = (void *)ds_1339 + }, + { + .compatible = "dallas,ds1388", + .data = (void *)ds_1388 + }, + { + .compatible = "dallas,ds1340", + .data = (void *)ds_1340 + }, + { + .compatible = "maxim,ds3231", + .data = (void *)ds_3231 + }, + { + .compatible = "st,m41t0", + .data = (void *)m41t00 + }, + { + .compatible = "st,m41t00", + .data = (void *)m41t00 + }, + { + .compatible = "microchip,mcp7940x", + .data = (void *)mcp794xx + }, + { + .compatible = "microchip,mcp7941x", + .data = (void *)mcp794xx + }, + { + .compatible = "pericom,pt7c4338", + .data = (void *)ds_1307 + }, + { + .compatible = "epson,rx8025", + .data = (void *)rx_8025 + }, + { + .compatible = "isil,isl12057", + .data = (void *)ds_1337 + }, + { } +}; +MODULE_DEVICE_TABLE(of, ds1307_of_match); +#endif + #ifdef CONFIG_ACPI static const struct acpi_device_id ds1307_acpi_ids[] = { { .id = "DS1307", .driver_data = ds_1307 }, @@ -201,6 +268,7 @@ static const struct acpi_device_id ds1307_acpi_ids[] = { { .id = "DS1388", .driver_data = ds_1388 }, { .id = "DS1340", .driver_data = ds_1340 }, { .id = "DS3231", .driver_data = ds_3231 }, + { .id = "M41T0", .driver_data = m41t0 }, { .id = "M41T00", .driver_data = m41t00 }, { .id = "MCP7940X", .driver_data = mcp794xx }, { .id = "MCP7941X", .driver_data = mcp794xx }, @@ -396,6 +464,13 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t) dev_dbg(dev, "%s: %7ph\n", "read", ds1307->regs); + /* if oscillator fail bit is set, no data can be trusted */ + if (ds1307->type == m41t0 && + ds1307->regs[DS1307_REG_MIN] & M41T0_BIT_OF) { + dev_warn_once(dev, "oscillator failed, set time!\n"); + return -EINVAL; + } + t->tm_sec = bcd2bin(ds1307->regs[DS1307_REG_SECS] & 0x7f); t->tm_min = bcd2bin(ds1307->regs[DS1307_REG_MIN] & 0x7f); tmp = ds1307->regs[DS1307_REG_HOUR] & 0x3f; @@ -1318,7 +1393,12 @@ static int ds1307_probe(struct i2c_client *client, i2c_set_clientdata(client, ds1307); ds1307->client = client; - if (id) { + + if (client->dev.of_node) { + ds1307->type = (enum ds_type) + of_device_get_match_data(&client->dev); + chip = &chips[ds1307->type]; + } else if (id) { chip = &chips[id->driver_data]; ds1307->type = id->driver_data; } else { @@ -1513,6 +1593,7 @@ read_rtc: tmp = ds1307->regs[DS1307_REG_SECS]; switch (ds1307->type) { case ds_1307: + case m41t0: case m41t00: /* clock halted? turn it on, so clock can tick. */ if (tmp & DS1307_BIT_CH) { @@ -1577,6 +1658,7 @@ read_rtc: tmp = ds1307->regs[DS1307_REG_HOUR]; switch (ds1307->type) { case ds_1340: + case m41t0: case m41t00: /* * NOTE: ignores century bits; fix before deploying @@ -1711,6 +1793,7 @@ static int ds1307_remove(struct i2c_client *client) static struct i2c_driver ds1307_driver = { .driver = { .name = "rtc-ds1307", + .of_match_table = of_match_ptr(ds1307_of_match), .acpi_match_table = ACPI_PTR(ds1307_acpi_ids), }, .probe = ds1307_probe, diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 52429f0a57cc..38a2e9e684df 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c @@ -525,6 +525,10 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd, if (get_user(new_margin, (int __user *)arg)) return -EFAULT; + /* the hardware's tick rate is 4096 Hz, so + * the counter value needs to be scaled accordingly + */ + new_margin <<= 12; if (new_margin < 1 || new_margin > 16777216) return -EINVAL; @@ -533,7 +537,8 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd, ds1374_wdt_ping(); /* fallthrough */ case WDIOC_GETTIMEOUT: - return put_user(wdt_margin, (int __user *)arg); + /* when returning ... inverse is true */ + return put_user((wdt_margin >> 12), (int __user *)arg); case WDIOC_SETOPTIONS: if (copy_from_user(&options, (int __user *)arg, sizeof(int))) return -EFAULT; @@ -541,14 +546,15 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd, if (options & WDIOS_DISABLECARD) { pr_info("disable watchdog\n"); ds1374_wdt_disable(); + return 0; } if (options & WDIOS_ENABLECARD) { pr_info("enable watchdog\n"); ds1374_wdt_settimeout(wdt_margin); ds1374_wdt_ping(); + return 0; } - return -EINVAL; } return -ENOTTY; @@ -704,6 +710,7 @@ static SIMPLE_DEV_PM_OPS(ds1374_pm, ds1374_suspend, ds1374_resume); static struct i2c_driver ds1374_driver = { .driver = { .name = "rtc-ds1374", + .of_match_table = of_match_ptr(ds1374_of_match), .pm = &ds1374_pm, }, .probe = ds1374_probe, diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c index 5c18ac7394c4..7bf46bfe11a4 100644 --- a/drivers/rtc/rtc-ds1672.c +++ b/drivers/rtc/rtc-ds1672.c @@ -196,10 +196,17 @@ static struct i2c_device_id ds1672_id[] = { }; MODULE_DEVICE_TABLE(i2c, ds1672_id); +static const struct of_device_id ds1672_of_match[] = { + { .compatible = "dallas,ds1672" }, + { } +}; +MODULE_DEVICE_TABLE(of, ds1672_of_match); + static struct i2c_driver ds1672_driver = { .driver = { .name = "rtc-ds1672", - }, + .of_match_table = of_match_ptr(ds1672_of_match), + }, .probe = &ds1672_probe, .id_table = ds1672_id, }; diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c index 9bb39a06b994..deff431a37c4 100644 --- a/drivers/rtc/rtc-ds3232.c +++ b/drivers/rtc/rtc-ds3232.c @@ -442,9 +442,16 @@ static const struct i2c_device_id ds3232_id[] = { }; MODULE_DEVICE_TABLE(i2c, ds3232_id); +static const struct of_device_id ds3232_of_match[] = { + { .compatible = "dallas,ds3232" }, + { } +}; +MODULE_DEVICE_TABLE(of, ds3232_of_match); + static struct i2c_driver ds3232_driver = { .driver = { .name = "rtc-ds3232", + .of_match_table = of_match_ptr(ds3232_of_match), .pm = &ds3232_pm_ops, }, .probe = ds3232_i2c_probe, diff --git a/drivers/rtc/rtc-gemini.c b/drivers/rtc/rtc-gemini.c index ccf0dbadb62d..5279390bb42d 100644 --- a/drivers/rtc/rtc-gemini.c +++ b/drivers/rtc/rtc-gemini.c @@ -139,6 +139,8 @@ static int gemini_rtc_probe(struct platform_device *pdev) rtc->rtc_base = devm_ioremap(dev, res->start, resource_size(res)); + if (!rtc->rtc_base) + return -ENOMEM; ret = devm_request_irq(dev, rtc->rtc_irq, gemini_rtc_interrupt, IRQF_SHARED, pdev->name, dev); diff --git a/drivers/rtc/rtc-hid-sensor-time.c b/drivers/rtc/rtc-hid-sensor-time.c index c398f74234c6..2751dba850c6 100644 --- a/drivers/rtc/rtc-hid-sensor-time.c +++ b/drivers/rtc/rtc-hid-sensor-time.c @@ -291,9 +291,9 @@ static int hid_time_probe(struct platform_device *pdev) "hid-sensor-time", &hid_time_rtc_ops, THIS_MODULE); - if (IS_ERR_OR_NULL(time_state->rtc)) { + if (IS_ERR(time_state->rtc)) { hid_device_io_stop(hsdev->hdev); - ret = time_state->rtc ? PTR_ERR(time_state->rtc) : -ENODEV; + ret = PTR_ERR(time_state->rtc); time_state->rtc = NULL; dev_err(&pdev->dev, "rtc device register failed!\n"); goto err_rtc; diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c index 2893785f0eba..8dd299c6a1f3 100644 --- a/drivers/rtc/rtc-isl1208.c +++ b/drivers/rtc/rtc-isl1208.c @@ -687,10 +687,18 @@ static const struct i2c_device_id isl1208_id[] = { }; MODULE_DEVICE_TABLE(i2c, isl1208_id); +static const struct of_device_id isl1208_of_match[] = { + { .compatible = "isil,isl1208" }, + { .compatible = "isil,isl1218" }, + { } +}; +MODULE_DEVICE_TABLE(of, isl1208_of_match); + static struct i2c_driver isl1208_driver = { .driver = { - .name = "rtc-isl1208", - }, + .name = "rtc-isl1208", + .of_match_table = of_match_ptr(isl1208_of_match), + }, .probe = isl1208_probe, .remove = isl1208_remove, .id_table = isl1208_id, diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 58698d21c2c3..5ec4653022ff 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -20,6 +20,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/rtc.h> #include <linux/slab.h> #include <linux/mutex.h> @@ -86,8 +87,66 @@ static const struct i2c_device_id m41t80_id[] = { }; MODULE_DEVICE_TABLE(i2c, m41t80_id); +static const struct of_device_id m41t80_of_match[] = { + { + .compatible = "st,m41t62", + .data = (void *)(M41T80_FEATURE_SQ | M41T80_FEATURE_SQ_ALT) + }, + { + .compatible = "st,m41t65", + .data = (void *)(M41T80_FEATURE_HT | M41T80_FEATURE_WD) + }, + { + .compatible = "st,m41t80", + .data = (void *)(M41T80_FEATURE_SQ) + }, + { + .compatible = "st,m41t81", + .data = (void *)(M41T80_FEATURE_HT | M41T80_FEATURE_SQ) + }, + { + .compatible = "st,m41t81s", + .data = (void *)(M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ) + }, + { + .compatible = "st,m41t82", + .data = (void *)(M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ) + }, + { + .compatible = "st,m41t83", + .data = (void *)(M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ) + }, + { + .compatible = "st,m41t84", + .data = (void *)(M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ) + }, + { + .compatible = "st,m41t85", + .data = (void *)(M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ) + }, + { + .compatible = "st,m41t87", + .data = (void *)(M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ) + }, + { + .compatible = "microcrystal,rv4162", + .data = (void *)(M41T80_FEATURE_SQ | M41T80_FEATURE_WD | M41T80_FEATURE_SQ_ALT) + }, + /* DT compatibility only, do not use compatibles below: */ + { + .compatible = "st,rv4162", + .data = (void *)(M41T80_FEATURE_SQ | M41T80_FEATURE_WD | M41T80_FEATURE_SQ_ALT) + }, + { + .compatible = "rv4162", + .data = (void *)(M41T80_FEATURE_SQ | M41T80_FEATURE_WD | M41T80_FEATURE_SQ_ALT) + }, + { } +}; +MODULE_DEVICE_TABLE(of, m41t80_of_match); + struct m41t80_data { - u8 features; + unsigned long features; struct rtc_device *rtc; }; @@ -786,7 +845,11 @@ static int m41t80_probe(struct i2c_client *client, if (!m41t80_data) return -ENOMEM; - m41t80_data->features = id->driver_data; + if (client->dev.of_node) + m41t80_data->features = (unsigned long) + of_device_get_match_data(&client->dev); + else + m41t80_data->features = id->driver_data; i2c_set_clientdata(client, m41t80_data); if (client->irq > 0) { @@ -894,6 +957,7 @@ static int m41t80_remove(struct i2c_client *client) static struct i2c_driver m41t80_driver = { .driver = { .name = "rtc-m41t80", + .of_match_table = of_match_ptr(m41t80_of_match), .pm = &m41t80_pm, }, .probe = m41t80_probe, diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index 73594f38c453..13f7cd11c07e 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -844,7 +844,7 @@ err: return ret; } -static int __exit omap_rtc_remove(struct platform_device *pdev) +static int omap_rtc_remove(struct platform_device *pdev) { struct omap_rtc *rtc = platform_get_drvdata(pdev); u8 reg; @@ -882,8 +882,7 @@ static int __exit omap_rtc_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int omap_rtc_suspend(struct device *dev) +static int __maybe_unused omap_rtc_suspend(struct device *dev) { struct omap_rtc *rtc = dev_get_drvdata(dev); @@ -906,7 +905,7 @@ static int omap_rtc_suspend(struct device *dev) return 0; } -static int omap_rtc_resume(struct device *dev) +static int __maybe_unused omap_rtc_resume(struct device *dev) { struct omap_rtc *rtc = dev_get_drvdata(dev); @@ -921,10 +920,8 @@ static int omap_rtc_resume(struct device *dev) return 0; } -#endif -#ifdef CONFIG_PM -static int omap_rtc_runtime_suspend(struct device *dev) +static int __maybe_unused omap_rtc_runtime_suspend(struct device *dev) { struct omap_rtc *rtc = dev_get_drvdata(dev); @@ -934,16 +931,9 @@ static int omap_rtc_runtime_suspend(struct device *dev) return 0; } -static int omap_rtc_runtime_resume(struct device *dev) -{ - return 0; -} -#endif - static const struct dev_pm_ops omap_rtc_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(omap_rtc_suspend, omap_rtc_resume) - SET_RUNTIME_PM_OPS(omap_rtc_runtime_suspend, - omap_rtc_runtime_resume, NULL) + SET_RUNTIME_PM_OPS(omap_rtc_runtime_suspend, NULL, NULL) }; static void omap_rtc_shutdown(struct platform_device *pdev) @@ -964,7 +954,7 @@ static void omap_rtc_shutdown(struct platform_device *pdev) static struct platform_driver omap_rtc_driver = { .probe = omap_rtc_probe, - .remove = __exit_p(omap_rtc_remove), + .remove = omap_rtc_remove, .shutdown = omap_rtc_shutdown, .driver = { .name = "omap_rtc", diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c index c8c757466783..d4eff8d7131f 100644 --- a/drivers/rtc/rtc-rs5c372.c +++ b/drivers/rtc/rtc-rs5c372.c @@ -15,6 +15,7 @@ #include <linux/bcd.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/of_device.h> /* * Ricoh has a family of I2C based RTCs, which differ only slightly from @@ -83,6 +84,35 @@ static const struct i2c_device_id rs5c372_id[] = { }; MODULE_DEVICE_TABLE(i2c, rs5c372_id); +static const struct of_device_id rs5c372_of_match[] = { + { + .compatible = "ricoh,r2025sd", + .data = (void *)rtc_r2025sd + }, + { + .compatible = "ricoh,r2221tl", + .data = (void *)rtc_r2221tl + }, + { + .compatible = "ricoh,rs5c372a", + .data = (void *)rtc_rs5c372a + }, + { + .compatible = "ricoh,rs5c372b", + .data = (void *)rtc_rs5c372b + }, + { + .compatible = "ricoh,rv5c386", + .data = (void *)rtc_rv5c386 + }, + { + .compatible = "ricoh,rv5c387a", + .data = (void *)rtc_rv5c387a + }, + { } +}; +MODULE_DEVICE_TABLE(of, rs5c372_of_match); + /* REVISIT: this assumes that: * - we're in the 21st century, so it's safe to ignore the century * bit for rv5c38[67] (REG_MONTH bit 7); @@ -581,7 +611,11 @@ static int rs5c372_probe(struct i2c_client *client, rs5c372->client = client; i2c_set_clientdata(client, rs5c372); - rs5c372->type = id->driver_data; + if (client->dev.of_node) + rs5c372->type = (enum rtc_type) + of_device_get_match_data(&client->dev); + else + rs5c372->type = id->driver_data; /* we read registers 0x0f then 0x00-0x0f; skip the first one */ rs5c372->regs = &rs5c372->buf[1]; @@ -673,6 +707,7 @@ static int rs5c372_remove(struct i2c_client *client) static struct i2c_driver rs5c372_driver = { .driver = { .name = "rtc-rs5c372", + .of_match_table = of_match_ptr(rs5c372_of_match), }, .probe = rs5c372_probe, .remove = rs5c372_remove, diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c index 1f9f7b4bf3fb..85fa1da03762 100644 --- a/drivers/rtc/rtc-rv3029c2.c +++ b/drivers/rtc/rtc-rv3029c2.c @@ -875,9 +875,18 @@ static struct i2c_device_id rv3029_id[] = { }; MODULE_DEVICE_TABLE(i2c, rv3029_id); +static const struct of_device_id rv3029_of_match[] = { + { .compatible = "rv3029" }, + { .compatible = "rv3029c2" }, + { .compatible = "mc,rv3029c2" }, + { } +}; +MODULE_DEVICE_TABLE(of, rv3029_of_match); + static struct i2c_driver rv3029_driver = { .driver = { .name = "rtc-rv3029c2", + .of_match_table = of_match_ptr(rv3029_of_match), }, .probe = rv3029_i2c_probe, .id_table = rv3029_id, diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c index f9277e536f7e..9ad97ab29866 100644 --- a/drivers/rtc/rtc-rv8803.c +++ b/drivers/rtc/rtc-rv8803.c @@ -18,6 +18,7 @@ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/rtc.h> #define RV8803_I2C_TRY_COUNT 4 @@ -556,7 +557,11 @@ static int rv8803_probe(struct i2c_client *client, mutex_init(&rv8803->flags_lock); rv8803->client = client; - rv8803->type = id->driver_data; + if (client->dev.of_node) + rv8803->type = (enum rv8803_type) + of_device_get_match_data(&client->dev); + else + rv8803->type = id->driver_data; i2c_set_clientdata(client, rv8803); flags = rv8803_read_reg(client, RV8803_FLAG); @@ -627,9 +632,23 @@ static const struct i2c_device_id rv8803_id[] = { }; MODULE_DEVICE_TABLE(i2c, rv8803_id); +static const struct of_device_id rv8803_of_match[] = { + { + .compatible = "microcrystal,rv8803", + .data = (void *)rx_8900 + }, + { + .compatible = "epson,rx8900", + .data = (void *)rx_8900 + }, + { } +}; +MODULE_DEVICE_TABLE(of, rv8803_of_match); + static struct i2c_driver rv8803_driver = { .driver = { .name = "rtc-rv8803", + .of_match_table = of_match_ptr(rv8803_of_match), }, .probe = rv8803_probe, .remove = rv8803_remove, diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c index d08da371912c..1ed3403ff8ac 100644 --- a/drivers/rtc/rtc-rx8010.c +++ b/drivers/rtc/rtc-rx8010.c @@ -59,6 +59,12 @@ static const struct i2c_device_id rx8010_id[] = { }; MODULE_DEVICE_TABLE(i2c, rx8010_id); +static const struct of_device_id rx8010_of_match[] = { + { .compatible = "epson,rx8010" }, + { } +}; +MODULE_DEVICE_TABLE(of, rx8010_of_match); + struct rx8010_data { struct i2c_client *client; struct rtc_device *rtc; @@ -487,6 +493,7 @@ static int rx8010_probe(struct i2c_client *client, static struct i2c_driver rx8010_driver = { .driver = { .name = "rtc-rx8010", + .of_match_table = of_match_ptr(rx8010_of_match), }, .probe = rx8010_probe, .id_table = rx8010_id, diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c index 0c362a3d1f17..9998d7937688 100644 --- a/drivers/rtc/rtc-rx8581.c +++ b/drivers/rtc/rtc-rx8581.c @@ -308,9 +308,16 @@ static const struct i2c_device_id rx8581_id[] = { }; MODULE_DEVICE_TABLE(i2c, rx8581_id); +static const struct of_device_id rx8581_of_match[] = { + { .compatible = "epson,rx8581" }, + { } +}; +MODULE_DEVICE_TABLE(of, rx8581_of_match); + static struct i2c_driver rx8581_driver = { .driver = { .name = "rtc-rx8581", + .of_match_table = of_match_ptr(rx8581_of_match), }, .probe = rx8581_probe, .id_table = rx8581_id, diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c index 5dab4665ca3b..449820eeefe8 100644 --- a/drivers/rtc/rtc-s35390a.c +++ b/drivers/rtc/rtc-s35390a.c @@ -58,6 +58,13 @@ static const struct i2c_device_id s35390a_id[] = { }; MODULE_DEVICE_TABLE(i2c, s35390a_id); +static const struct of_device_id s35390a_of_match[] = { + { .compatible = "s35390a" }, + { .compatible = "sii,s35390a" }, + { } +}; +MODULE_DEVICE_TABLE(of, s35390a_of_match); + struct s35390a { struct i2c_client *client[8]; struct rtc_device *rtc; @@ -502,6 +509,7 @@ static int s35390a_remove(struct i2c_client *client) static struct i2c_driver s35390a_driver = { .driver = { .name = "rtc-s35390a", + .of_match_table = of_match_ptr(s35390a_of_match), }, .probe = s35390a_probe, .remove = s35390a_remove, diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index c626e43a9cbb..6c2d3989f967 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -27,7 +27,15 @@ #include <linux/log2.h> #include <linux/clk.h> #include <linux/slab.h> +#ifdef CONFIG_SUPERH #include <asm/rtc.h> +#else +/* Default values for RZ/A RTC */ +#define rtc_reg_size sizeof(u16) +#define RTC_BIT_INVERTED 0 /* no chip bugs */ +#define RTC_CAP_4_DIGIT_YEAR (1 << 0) +#define RTC_DEF_CAPABILITIES RTC_CAP_4_DIGIT_YEAR +#endif #define DRV_NAME "sh-rtc" @@ -570,6 +578,8 @@ static int __init sh_rtc_probe(struct platform_device *pdev) rtc->alarm_irq = platform_get_irq(pdev, 2); res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (unlikely(res == NULL)) { dev_err(&pdev->dev, "No IO resource\n"); return -ENOENT; @@ -587,12 +597,15 @@ static int __init sh_rtc_probe(struct platform_device *pdev) if (unlikely(!rtc->regbase)) return -EINVAL; - clk_id = pdev->id; - /* With a single device, the clock id is still "rtc0" */ - if (clk_id < 0) - clk_id = 0; + if (!pdev->dev.of_node) { + clk_id = pdev->id; + /* With a single device, the clock id is still "rtc0" */ + if (clk_id < 0) + clk_id = 0; - snprintf(clk_name, sizeof(clk_name), "rtc%d", clk_id); + snprintf(clk_name, sizeof(clk_name), "rtc%d", clk_id); + } else + snprintf(clk_name, sizeof(clk_name), "fck"); rtc->clk = devm_clk_get(&pdev->dev, clk_name); if (IS_ERR(rtc->clk)) { @@ -608,6 +621,8 @@ static int __init sh_rtc_probe(struct platform_device *pdev) clk_enable(rtc->clk); rtc->capabilities = RTC_DEF_CAPABILITIES; + +#ifdef CONFIG_SUPERH if (dev_get_platdata(&pdev->dev)) { struct sh_rtc_platform_info *pinfo = dev_get_platdata(&pdev->dev); @@ -618,6 +633,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev) */ rtc->capabilities |= pinfo->capabilities; } +#endif if (rtc->carry_irq <= 0) { /* register shared periodic/carry/alarm irq */ @@ -718,8 +734,7 @@ static void sh_rtc_set_irq_wake(struct device *dev, int enabled) } } -#ifdef CONFIG_PM_SLEEP -static int sh_rtc_suspend(struct device *dev) +static int __maybe_unused sh_rtc_suspend(struct device *dev) { if (device_may_wakeup(dev)) sh_rtc_set_irq_wake(dev, 1); @@ -727,21 +742,27 @@ static int sh_rtc_suspend(struct device *dev) return 0; } -static int sh_rtc_resume(struct device *dev) +static int __maybe_unused sh_rtc_resume(struct device *dev) { if (device_may_wakeup(dev)) sh_rtc_set_irq_wake(dev, 0); return 0; } -#endif static SIMPLE_DEV_PM_OPS(sh_rtc_pm_ops, sh_rtc_suspend, sh_rtc_resume); +static const struct of_device_id sh_rtc_of_match[] = { + { .compatible = "renesas,sh-rtc", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sh_rtc_of_match); + static struct platform_driver sh_rtc_platform_driver = { .driver = { .name = DRV_NAME, .pm = &sh_rtc_pm_ops, + .of_match_table = sh_rtc_of_match, }, .remove = __exit_p(sh_rtc_remove), }; diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c index d51b07d620f7..d8ef9e052c4f 100644 --- a/drivers/rtc/rtc-snvs.c +++ b/drivers/rtc/rtc-snvs.c @@ -258,7 +258,7 @@ static int snvs_rtc_probe(struct platform_device *pdev) of_property_read_u32(pdev->dev.of_node, "offset", &data->offset); } - if (!data->regmap) { + if (IS_ERR(data->regmap)) { dev_err(&pdev->dev, "Can't find snvs syscon\n"); return -ENODEV; } diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c index fa247deb9cf4..483c7993516b 100644 --- a/drivers/rtc/rtc-wm8350.c +++ b/drivers/rtc/rtc-wm8350.c @@ -30,8 +30,6 @@ #define WM8350_SET_TIME_RETRIES 5 #define WM8350_GET_TIME_RETRIES 5 -#define to_wm8350_from_rtc_dev(d) container_of(d, struct wm8350, rtc.pdev.dev) - /* * Read current time and date in RTC */ diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c index 2ce0b3eb2efe..a99d09a11f05 100644 --- a/drivers/s390/virtio/kvm_virtio.c +++ b/drivers/s390/virtio/kvm_virtio.c @@ -189,7 +189,7 @@ static bool kvm_notify(struct virtqueue *vq) static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), - const char *name) + const char *name, bool ctx) { struct kvm_device *kdev = to_kvmdev(vdev); struct kvm_vqconfig *config; @@ -211,7 +211,7 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, goto out; vq = vring_new_virtqueue(index, config->num, KVM_S390_VIRTIO_RING_ALIGN, - vdev, true, (void *) config->address, + vdev, true, ctx, (void *) config->address, kvm_notify, callback, name); if (!vq) { err = -ENOMEM; @@ -256,6 +256,7 @@ static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], + const bool *ctx, struct irq_affinity *desc) { struct kvm_device *kdev = to_kvmdev(vdev); @@ -266,7 +267,8 @@ static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, return -ENOENT; for (i = 0; i < nvqs; ++i) { - vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]); + vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i], + ctx ? ctx[i] : false); if (IS_ERR(vqs[i])) goto error; } diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 0ed209f3d8b0..2a76ea78a0bf 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -484,7 +484,7 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev) static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, int i, vq_callback_t *callback, - const char *name, + const char *name, bool ctx, struct ccw1 *ccw) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); @@ -522,7 +522,7 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, } vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev, - true, info->queue, virtio_ccw_kvm_notify, + true, ctx, info->queue, virtio_ccw_kvm_notify, callback, name); if (!vq) { /* For now, we fail if we can't get the requested size. */ @@ -629,6 +629,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], + const bool *ctx, struct irq_affinity *desc) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); @@ -642,7 +643,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, for (i = 0; i < nvqs; ++i) { vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], - ccw); + ctx ? ctx[i] : false, ccw); if (IS_ERR(vqs[i])) { ret = PTR_ERR(vqs[i]); vqs[i] = NULL; diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index f44d0487236e..ce5dc73d85bb 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -331,11 +331,11 @@ MODULE_LICENSE("GPL"); #if !defined(PCMCIA) #if defined(MODULE) static int io[] = {0, 0}; -module_param_array(io, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); MODULE_PARM_DESC(io,"base io address of controller"); static int irq[] = {0, 0}; -module_param_array(irq, int, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); MODULE_PARM_DESC(irq,"interrupt for controller"); static int scsiid[] = {7, 7}; diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c index 7db448ec8beb..a23cc9ac5acd 100644 --- a/drivers/scsi/aha1542.c +++ b/drivers/scsi/aha1542.c @@ -31,7 +31,7 @@ module_param(isapnp, bool, 0); MODULE_PARM_DESC(isapnp, "enable PnP support (default=1)"); static int io[MAXBOARDS] = { 0x330, 0x334, 0, 0 }; -module_param_array(io, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); MODULE_PARM_DESC(io, "base IO address of controller (0x130,0x134,0x230,0x234,0x330,0x334, default=0x330,0x334)"); /* time AHA spends on the AT-bus during data transfer */ diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 67c8dac321ad..c34fc91ba486 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -85,8 +85,8 @@ static int ncr_53c400; static int ncr_53c400a; static int dtc_3181e; static int hp_c2502; -module_param(ncr_irq, int, 0); -module_param(ncr_addr, int, 0); +module_param_hw(ncr_irq, int, irq, 0); +module_param_hw(ncr_addr, int, ioport, 0); module_param(ncr_5380, int, 0); module_param(ncr_53c400, int, 0); module_param(ncr_53c400a, int, 0); @@ -94,11 +94,11 @@ module_param(dtc_3181e, int, 0); module_param(hp_c2502, int, 0); static int irq[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; -module_param_array(irq, int, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); MODULE_PARM_DESC(irq, "IRQ number(s) (0=none, 254=auto [default])"); static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; -module_param_array(base, int, NULL, 0); +module_param_hw_array(base, int, ioport, NULL, 0); MODULE_PARM_DESC(base, "base address(es)"); static int card[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index d020a13646ae..facc7271f932 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -353,7 +353,7 @@ static int probe_eisa_isa = 0; static int force_dma32 = 0; /* parameters for modprobe/insmod */ -module_param_array(irq, int, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); module_param(disable, int, 0); module_param(reserve_mode, int, 0); module_param_array(reserve_list, int, NULL, 0); diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c index 61cac87fb86f..840823b99e51 100644 --- a/drivers/scsi/qlogicfas.c +++ b/drivers/scsi/qlogicfas.c @@ -137,8 +137,8 @@ err: static struct qlogicfas408_priv *cards; static int iobase[MAX_QLOGICFAS]; static int irq[MAX_QLOGICFAS] = { [0 ... MAX_QLOGICFAS-1] = -1 }; -module_param_array(iobase, int, NULL, 0); -module_param_array(irq, int, NULL, 0); +module_param_hw_array(iobase, int, ioport, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); MODULE_PARM_DESC(iobase, "I/O address"); MODULE_PARM_DESC(irq, "IRQ"); diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index a29d068b7696..f8dbfeee6c63 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -894,8 +894,7 @@ static int virtscsi_init(struct virtio_device *vdev, } /* Discover virtqueues and write information to configuration. */ - err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names, - &desc); + err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc); if (err) goto out; diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h index 22725bdc6f15..5fe9faf6232e 100644 --- a/drivers/soc/fsl/qbman/qman_priv.h +++ b/drivers/soc/fsl/qbman/qman_priv.h @@ -33,6 +33,7 @@ #include "dpaa_sys.h" #include <soc/fsl/qman.h> +#include <linux/dma-mapping.h> #include <linux/iommu.h> #if defined(CONFIG_FSL_PAMU) diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c index 6f9d540a97ce..fff930fc3cff 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c @@ -1115,7 +1115,7 @@ int ldlm_init(void) ldlm_lock_slab = kmem_cache_create("ldlm_locks", sizeof(struct ldlm_lock), 0, SLAB_HWCACHE_ALIGN | - SLAB_DESTROY_BY_RCU, NULL); + SLAB_TYPESAFE_BY_RCU, NULL); if (!ldlm_lock_slab) { kmem_cache_destroy(ldlm_resource_slab); return -ENOMEM; diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c index ad72f8e883fc..a041441766aa 100644 --- a/drivers/staging/speakup/speakup_acntpc.c +++ b/drivers/staging/speakup/speakup_acntpc.c @@ -310,7 +310,7 @@ static void accent_release(void) speakup_info.port_tts = 0; } -module_param_named(port, port_forced, int, 0444); +module_param_hw_named(port, port_forced, int, ioport, 0444); module_param_named(start, synth_acntpc.startup, short, 0444); MODULE_PARM_DESC(port, "Set the port for the synthesizer (override probing)."); diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c index 5973acc0a006..33180937222d 100644 --- a/drivers/staging/speakup/speakup_dtlk.c +++ b/drivers/staging/speakup/speakup_dtlk.c @@ -382,7 +382,7 @@ static void dtlk_release(void) speakup_info.port_tts = 0; } -module_param_named(port, port_forced, int, 0444); +module_param_hw_named(port, port_forced, int, ioport, 0444); module_param_named(start, synth_dtlk.startup, short, 0444); MODULE_PARM_DESC(port, "Set the port for the synthesizer (override probing)."); diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c index ba7901178e0b..d3203f8fc3d0 100644 --- a/drivers/staging/speakup/speakup_keypc.c +++ b/drivers/staging/speakup/speakup_keypc.c @@ -312,7 +312,7 @@ static void keynote_release(void) synth_port = 0; } -module_param_named(port, port_forced, int, 0444); +module_param_hw_named(port, port_forced, int, ioport, 0444); module_param_named(start, synth_keypc.startup, short, 0444); MODULE_PARM_DESC(port, "Set the port for the synthesizer (override probing)."); diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c index 20a2d835fdaa..367535b4b77f 100644 --- a/drivers/staging/vme/devices/vme_pio2_core.c +++ b/drivers/staging/vme/devices/vme_pio2_core.c @@ -466,16 +466,16 @@ static void __exit pio2_exit(void) /* These are required for each board */ MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the board is connected"); -module_param_array(bus, int, &bus_num, 0444); +module_param_hw_array(bus, int, other, &bus_num, 0444); MODULE_PARM_DESC(base, "Base VME address for PIO2 Registers"); -module_param_array(base, long, &base_num, 0444); +module_param_hw_array(base, long, other, &base_num, 0444); MODULE_PARM_DESC(vector, "VME IRQ Vector (Lower 4 bits masked)"); -module_param_array(vector, int, &vector_num, 0444); +module_param_hw_array(vector, int, other, &vector_num, 0444); MODULE_PARM_DESC(level, "VME IRQ Level"); -module_param_array(level, int, &level_num, 0444); +module_param_hw_array(level, int, other, &level_num, 0444); MODULE_PARM_DESC(variant, "Last 4 characters of PIO2 board variant"); module_param_array(variant, charp, &variant_num, 0444); diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig new file mode 100644 index 000000000000..2330a4eb4e8b --- /dev/null +++ b/drivers/tee/Kconfig @@ -0,0 +1,18 @@ +# Generic Trusted Execution Environment Configuration +config TEE + tristate "Trusted Execution Environment support" + select DMA_SHARED_BUFFER + select GENERIC_ALLOCATOR + help + This implements a generic interface towards a Trusted Execution + Environment (TEE). + +if TEE + +menu "TEE drivers" + +source "drivers/tee/optee/Kconfig" + +endmenu + +endif diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile new file mode 100644 index 000000000000..7a4e4a1ac39c --- /dev/null +++ b/drivers/tee/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_TEE) += tee.o +tee-objs += tee_core.o +tee-objs += tee_shm.o +tee-objs += tee_shm_pool.o +obj-$(CONFIG_OPTEE) += optee/ diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig new file mode 100644 index 000000000000..0126de898036 --- /dev/null +++ b/drivers/tee/optee/Kconfig @@ -0,0 +1,7 @@ +# OP-TEE Trusted Execution Environment Configuration +config OPTEE + tristate "OP-TEE" + depends on HAVE_ARM_SMCCC + help + This implements the OP-TEE Trusted Execution Environment (TEE) + driver. diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile new file mode 100644 index 000000000000..92fe5789bcce --- /dev/null +++ b/drivers/tee/optee/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_OPTEE) += optee.o +optee-objs += core.o +optee-objs += call.o +optee-objs += rpc.o +optee-objs += supp.o diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c new file mode 100644 index 000000000000..f7b7b404c990 --- /dev/null +++ b/drivers/tee/optee/call.c @@ -0,0 +1,444 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/arm-smccc.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include "optee_private.h" +#include "optee_smc.h" + +struct optee_call_waiter { + struct list_head list_node; + struct completion c; +}; + +static void optee_cq_wait_init(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + /* + * We're preparing to make a call to secure world. In case we can't + * allocate a thread in secure world we'll end up waiting in + * optee_cq_wait_for_completion(). + * + * Normally if there's no contention in secure world the call will + * complete and we can cleanup directly with optee_cq_wait_final(). + */ + mutex_lock(&cq->mutex); + + /* + * We add ourselves to the queue, but we don't wait. This + * guarantees that we don't lose a completion if secure world + * returns busy and another thread just exited and try to complete + * someone. + */ + init_completion(&w->c); + list_add_tail(&w->list_node, &cq->waiters); + + mutex_unlock(&cq->mutex); +} + +static void optee_cq_wait_for_completion(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + wait_for_completion(&w->c); + + mutex_lock(&cq->mutex); + + /* Move to end of list to get out of the way for other waiters */ + list_del(&w->list_node); + reinit_completion(&w->c); + list_add_tail(&w->list_node, &cq->waiters); + + mutex_unlock(&cq->mutex); +} + +static void optee_cq_complete_one(struct optee_call_queue *cq) +{ + struct optee_call_waiter *w; + + list_for_each_entry(w, &cq->waiters, list_node) { + if (!completion_done(&w->c)) { + complete(&w->c); + break; + } + } +} + +static void optee_cq_wait_final(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + /* + * We're done with the call to secure world. The thread in secure + * world that was used for this call is now available for some + * other task to use. + */ + mutex_lock(&cq->mutex); + + /* Get out of the list */ + list_del(&w->list_node); + + /* Wake up one eventual waiting task */ + optee_cq_complete_one(cq); + + /* + * If we're completed we've got a completion from another task that + * was just done with its call to secure world. Since yet another + * thread now is available in secure world wake up another eventual + * waiting task. + */ + if (completion_done(&w->c)) + optee_cq_complete_one(cq); + + mutex_unlock(&cq->mutex); +} + +/* Requires the filpstate mutex to be held */ +static struct optee_session *find_session(struct optee_context_data *ctxdata, + u32 session_id) +{ + struct optee_session *sess; + + list_for_each_entry(sess, &ctxdata->sess_list, list_node) + if (sess->session_id == session_id) + return sess; + + return NULL; +} + +/** + * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world + * @ctx: calling context + * @parg: physical address of message to pass to secure world + * + * Does and SMC to OP-TEE in secure world and handles eventual resulting + * Remote Procedure Calls (RPC) from OP-TEE. + * + * Returns return code from secure world, 0 is OK + */ +u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) +{ + struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_call_waiter w; + struct optee_rpc_param param = { }; + u32 ret; + + param.a0 = OPTEE_SMC_CALL_WITH_ARG; + reg_pair_from_64(¶m.a1, ¶m.a2, parg); + /* Initialize waiter */ + optee_cq_wait_init(&optee->call_queue, &w); + while (true) { + struct arm_smccc_res res; + + optee->invoke_fn(param.a0, param.a1, param.a2, param.a3, + param.a4, param.a5, param.a6, param.a7, + &res); + + if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) { + /* + * Out of threads in secure world, wait for a thread + * become available. + */ + optee_cq_wait_for_completion(&optee->call_queue, &w); + } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { + param.a0 = res.a0; + param.a1 = res.a1; + param.a2 = res.a2; + param.a3 = res.a3; + optee_handle_rpc(ctx, ¶m); + } else { + ret = res.a0; + break; + } + } + + /* + * We're done with our thread in secure world, if there's any + * thread waiters wake up one. + */ + optee_cq_wait_final(&optee->call_queue, &w); + + return ret; +} + +static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params, + struct optee_msg_arg **msg_arg, + phys_addr_t *msg_parg) +{ + int rc; + struct tee_shm *shm; + struct optee_msg_arg *ma; + + shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params), + TEE_SHM_MAPPED); + if (IS_ERR(shm)) + return shm; + + ma = tee_shm_get_va(shm, 0); + if (IS_ERR(ma)) { + rc = PTR_ERR(ma); + goto out; + } + + rc = tee_shm_get_pa(shm, 0, msg_parg); + if (rc) + goto out; + + memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params)); + ma->num_params = num_params; + *msg_arg = ma; +out: + if (rc) { + tee_shm_free(shm); + return ERR_PTR(rc); + } + + return shm; +} + +int optee_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param) +{ + struct optee_context_data *ctxdata = ctx->data; + int rc; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess = NULL; + + /* +2 for the meta parameters added below */ + shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION; + msg_arg->cancel_id = arg->cancel_id; + + /* + * Initialize and add the meta parameters needed when opening a + * session. + */ + msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | + OPTEE_MSG_ATTR_META; + msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | + OPTEE_MSG_ATTR_META; + memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid)); + memcpy(&msg_arg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid)); + msg_arg->params[1].u.value.c = arg->clnt_login; + + rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param); + if (rc) + goto out; + + sess = kzalloc(sizeof(*sess), GFP_KERNEL); + if (!sess) { + rc = -ENOMEM; + goto out; + } + + if (optee_do_call_with_arg(ctx, msg_parg)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + if (msg_arg->ret == TEEC_SUCCESS) { + /* A new session has been created, add it to the list. */ + sess->session_id = msg_arg->session; + mutex_lock(&ctxdata->mutex); + list_add(&sess->list_node, &ctxdata->sess_list); + mutex_unlock(&ctxdata->mutex); + } else { + kfree(sess); + } + + if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) { + arg->ret = TEEC_ERROR_COMMUNICATION; + arg->ret_origin = TEEC_ORIGIN_COMMS; + /* Close session again to avoid leakage */ + optee_close_session(ctx, msg_arg->session); + } else { + arg->session = msg_arg->session; + arg->ret = msg_arg->ret; + arg->ret_origin = msg_arg->ret_origin; + } +out: + tee_shm_free(shm); + + return rc; +} + +int optee_close_session(struct tee_context *ctx, u32 session) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess; + + /* Check that the session is valid and remove it from the list */ + mutex_lock(&ctxdata->mutex); + sess = find_session(ctxdata, session); + if (sess) + list_del(&sess->list_node); + mutex_unlock(&ctxdata->mutex); + if (!sess) + return -EINVAL; + kfree(sess); + + shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; + msg_arg->session = session; + optee_do_call_with_arg(ctx, msg_parg); + + tee_shm_free(shm); + return 0; +} + +int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, + struct tee_param *param) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess; + int rc; + + /* Check that the session is valid */ + mutex_lock(&ctxdata->mutex); + sess = find_session(ctxdata, arg->session); + mutex_unlock(&ctxdata->mutex); + if (!sess) + return -EINVAL; + + shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND; + msg_arg->func = arg->func; + msg_arg->session = arg->session; + msg_arg->cancel_id = arg->cancel_id; + + rc = optee_to_msg_param(msg_arg->params, arg->num_params, param); + if (rc) + goto out; + + if (optee_do_call_with_arg(ctx, msg_parg)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + arg->ret = msg_arg->ret; + arg->ret_origin = msg_arg->ret_origin; +out: + tee_shm_free(shm); + return rc; +} + +int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess; + + /* Check that the session is valid */ + mutex_lock(&ctxdata->mutex); + sess = find_session(ctxdata, session); + mutex_unlock(&ctxdata->mutex); + if (!sess) + return -EINVAL; + + shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = OPTEE_MSG_CMD_CANCEL; + msg_arg->session = session; + msg_arg->cancel_id = cancel_id; + optee_do_call_with_arg(ctx, msg_parg); + + tee_shm_free(shm); + return 0; +} + +/** + * optee_enable_shm_cache() - Enables caching of some shared memory allocation + * in OP-TEE + * @optee: main service struct + */ +void optee_enable_shm_cache(struct optee *optee) +{ + struct optee_call_waiter w; + + /* We need to retry until secure world isn't busy. */ + optee_cq_wait_init(&optee->call_queue, &w); + while (true) { + struct arm_smccc_res res; + + optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, + 0, &res); + if (res.a0 == OPTEE_SMC_RETURN_OK) + break; + optee_cq_wait_for_completion(&optee->call_queue, &w); + } + optee_cq_wait_final(&optee->call_queue, &w); +} + +/** + * optee_disable_shm_cache() - Disables caching of some shared memory allocation + * in OP-TEE + * @optee: main service struct + */ +void optee_disable_shm_cache(struct optee *optee) +{ + struct optee_call_waiter w; + + /* We need to retry until secure world isn't busy. */ + optee_cq_wait_init(&optee->call_queue, &w); + while (true) { + union { + struct arm_smccc_res smccc; + struct optee_smc_disable_shm_cache_result result; + } res; + + optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, + 0, &res.smccc); + if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL) + break; /* All shm's freed */ + if (res.result.status == OPTEE_SMC_RETURN_OK) { + struct tee_shm *shm; + + shm = reg_pair_to_ptr(res.result.shm_upper32, + res.result.shm_lower32); + tee_shm_free(shm); + } else { + optee_cq_wait_for_completion(&optee->call_queue, &w); + } + } + optee_cq_wait_final(&optee->call_queue, &w); +} diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c new file mode 100644 index 000000000000..58169e519422 --- /dev/null +++ b/drivers/tee/optee/core.c @@ -0,0 +1,622 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/arm-smccc.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/tee_drv.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include "optee_private.h" +#include "optee_smc.h" + +#define DRIVER_NAME "optee" + +#define OPTEE_SHM_NUM_PRIV_PAGES 1 + +/** + * optee_from_msg_param() - convert from OPTEE_MSG parameters to + * struct tee_param + * @params: subsystem internal parameter representation + * @num_params: number of elements in the parameter arrays + * @msg_params: OPTEE_MSG parameters + * Returns 0 on success or <0 on failure + */ +int optee_from_msg_param(struct tee_param *params, size_t num_params, + const struct optee_msg_param *msg_params) +{ + int rc; + size_t n; + struct tee_shm *shm; + phys_addr_t pa; + + for (n = 0; n < num_params; n++) { + struct tee_param *p = params + n; + const struct optee_msg_param *mp = msg_params + n; + u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK; + + switch (attr) { + case OPTEE_MSG_ATTR_TYPE_NONE: + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; + memset(&p->u, 0, sizeof(p->u)); + break; + case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT: + case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT + + attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; + p->u.value.a = mp->u.value.a; + p->u.value.b = mp->u.value.b; + p->u.value.c = mp->u.value.c; + break; + case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT: + case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + + attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT; + p->u.memref.size = mp->u.tmem.size; + shm = (struct tee_shm *)(unsigned long) + mp->u.tmem.shm_ref; + if (!shm) { + p->u.memref.shm_offs = 0; + p->u.memref.shm = NULL; + break; + } + rc = tee_shm_get_pa(shm, 0, &pa); + if (rc) + return rc; + p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa; + p->u.memref.shm = shm; + + /* Check that the memref is covered by the shm object */ + if (p->u.memref.size) { + size_t o = p->u.memref.shm_offs + + p->u.memref.size - 1; + + rc = tee_shm_get_pa(shm, o, NULL); + if (rc) + return rc; + } + break; + default: + return -EINVAL; + } + } + return 0; +} + +/** + * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters + * @msg_params: OPTEE_MSG parameters + * @num_params: number of elements in the parameter arrays + * @params: subsystem itnernal parameter representation + * Returns 0 on success or <0 on failure + */ +int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, + const struct tee_param *params) +{ + int rc; + size_t n; + phys_addr_t pa; + + for (n = 0; n < num_params; n++) { + const struct tee_param *p = params + n; + struct optee_msg_param *mp = msg_params + n; + + switch (p->attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: + mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; + memset(&mp->u, 0, sizeof(mp->u)); + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr - + TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + mp->u.value.a = p->u.value.a; + mp->u.value.b = p->u.value.b; + mp->u.value.c = p->u.value.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + + p->attr - + TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; + mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm; + mp->u.tmem.size = p->u.memref.size; + if (!p->u.memref.shm) { + mp->u.tmem.buf_ptr = 0; + break; + } + rc = tee_shm_get_pa(p->u.memref.shm, + p->u.memref.shm_offs, &pa); + if (rc) + return rc; + mp->u.tmem.buf_ptr = pa; + mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED << + OPTEE_MSG_ATTR_CACHE_SHIFT; + break; + default: + return -EINVAL; + } + } + return 0; +} + +static void optee_get_version(struct tee_device *teedev, + struct tee_ioctl_version_data *vers) +{ + struct tee_ioctl_version_data v = { + .impl_id = TEE_IMPL_ID_OPTEE, + .impl_caps = TEE_OPTEE_CAP_TZ, + .gen_caps = TEE_GEN_CAP_GP, + }; + *vers = v; +} + +static int optee_open(struct tee_context *ctx) +{ + struct optee_context_data *ctxdata; + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + + ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL); + if (!ctxdata) + return -ENOMEM; + + if (teedev == optee->supp_teedev) { + bool busy = true; + + mutex_lock(&optee->supp.ctx_mutex); + if (!optee->supp.ctx) { + busy = false; + optee->supp.ctx = ctx; + } + mutex_unlock(&optee->supp.ctx_mutex); + if (busy) { + kfree(ctxdata); + return -EBUSY; + } + } + + mutex_init(&ctxdata->mutex); + INIT_LIST_HEAD(&ctxdata->sess_list); + + ctx->data = ctxdata; + return 0; +} + +static void optee_release(struct tee_context *ctx) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct tee_shm *shm; + struct optee_msg_arg *arg = NULL; + phys_addr_t parg; + struct optee_session *sess; + struct optee_session *sess_tmp; + + if (!ctxdata) + return; + + shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED); + if (!IS_ERR(shm)) { + arg = tee_shm_get_va(shm, 0); + /* + * If va2pa fails for some reason, we can't call + * optee_close_session(), only free the memory. Secure OS + * will leak sessions and finally refuse more sessions, but + * we will at least let normal world reclaim its memory. + */ + if (!IS_ERR(arg)) + tee_shm_va2pa(shm, arg, &parg); + } + + list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list, + list_node) { + list_del(&sess->list_node); + if (!IS_ERR_OR_NULL(arg)) { + memset(arg, 0, sizeof(*arg)); + arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; + arg->session = sess->session_id; + optee_do_call_with_arg(ctx, parg); + } + kfree(sess); + } + kfree(ctxdata); + + if (!IS_ERR(shm)) + tee_shm_free(shm); + + ctx->data = NULL; + + if (teedev == optee->supp_teedev) { + mutex_lock(&optee->supp.ctx_mutex); + optee->supp.ctx = NULL; + mutex_unlock(&optee->supp.ctx_mutex); + } +} + +static struct tee_driver_ops optee_ops = { + .get_version = optee_get_version, + .open = optee_open, + .release = optee_release, + .open_session = optee_open_session, + .close_session = optee_close_session, + .invoke_func = optee_invoke_func, + .cancel_req = optee_cancel_req, +}; + +static struct tee_desc optee_desc = { + .name = DRIVER_NAME "-clnt", + .ops = &optee_ops, + .owner = THIS_MODULE, +}; + +static struct tee_driver_ops optee_supp_ops = { + .get_version = optee_get_version, + .open = optee_open, + .release = optee_release, + .supp_recv = optee_supp_recv, + .supp_send = optee_supp_send, +}; + +static struct tee_desc optee_supp_desc = { + .name = DRIVER_NAME "-supp", + .ops = &optee_supp_ops, + .owner = THIS_MODULE, + .flags = TEE_DESC_PRIVILEGED, +}; + +static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn) +{ + struct arm_smccc_res res; + + invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res); + + if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 && + res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3) + return true; + return false; +} + +static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_calls_revision_result result; + } res; + + invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc); + + if (res.result.major == OPTEE_MSG_REVISION_MAJOR && + (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR) + return true; + return false; +} + +static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, + u32 *sec_caps) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_exchange_capabilities_result result; + } res; + u32 a1 = 0; + + /* + * TODO This isn't enough to tell if it's UP system (from kernel + * point of view) or not, is_smp() returns the the information + * needed, but can't be called directly from here. + */ + if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1) + a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR; + + invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0, + &res.smccc); + + if (res.result.status != OPTEE_SMC_RETURN_OK) + return false; + + *sec_caps = res.result.capabilities; + return true; +} + +static struct tee_shm_pool * +optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_get_shm_config_result result; + } res; + struct tee_shm_pool *pool; + unsigned long vaddr; + phys_addr_t paddr; + size_t size; + phys_addr_t begin; + phys_addr_t end; + void *va; + struct tee_shm_pool_mem_info priv_info; + struct tee_shm_pool_mem_info dmabuf_info; + + invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc); + if (res.result.status != OPTEE_SMC_RETURN_OK) { + pr_info("shm service not available\n"); + return ERR_PTR(-ENOENT); + } + + if (res.result.settings != OPTEE_SMC_SHM_CACHED) { + pr_err("only normal cached shared memory supported\n"); + return ERR_PTR(-EINVAL); + } + + begin = roundup(res.result.start, PAGE_SIZE); + end = rounddown(res.result.start + res.result.size, PAGE_SIZE); + paddr = begin; + size = end - begin; + + if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) { + pr_err("too small shared memory area\n"); + return ERR_PTR(-EINVAL); + } + + va = memremap(paddr, size, MEMREMAP_WB); + if (!va) { + pr_err("shared memory ioremap failed\n"); + return ERR_PTR(-EINVAL); + } + vaddr = (unsigned long)va; + + priv_info.vaddr = vaddr; + priv_info.paddr = paddr; + priv_info.size = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + dmabuf_info.vaddr = vaddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + dmabuf_info.paddr = paddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + dmabuf_info.size = size - OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + + pool = tee_shm_pool_alloc_res_mem(&priv_info, &dmabuf_info); + if (IS_ERR(pool)) { + memunmap(va); + goto out; + } + + *memremaped_shm = va; +out: + return pool; +} + +/* Simple wrapper functions to be able to use a function pointer */ +static void optee_smccc_smc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + unsigned long a6, unsigned long a7, + struct arm_smccc_res *res) +{ + arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res); +} + +static void optee_smccc_hvc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + unsigned long a6, unsigned long a7, + struct arm_smccc_res *res) +{ + arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res); +} + +static optee_invoke_fn *get_invoke_func(struct device_node *np) +{ + const char *method; + + pr_info("probing for conduit method from DT.\n"); + + if (of_property_read_string(np, "method", &method)) { + pr_warn("missing \"method\" property\n"); + return ERR_PTR(-ENXIO); + } + + if (!strcmp("hvc", method)) + return optee_smccc_hvc; + else if (!strcmp("smc", method)) + return optee_smccc_smc; + + pr_warn("invalid \"method\" property: %s\n", method); + return ERR_PTR(-EINVAL); +} + +static struct optee *optee_probe(struct device_node *np) +{ + optee_invoke_fn *invoke_fn; + struct tee_shm_pool *pool; + struct optee *optee = NULL; + void *memremaped_shm = NULL; + struct tee_device *teedev; + u32 sec_caps; + int rc; + + invoke_fn = get_invoke_func(np); + if (IS_ERR(invoke_fn)) + return (void *)invoke_fn; + + if (!optee_msg_api_uid_is_optee_api(invoke_fn)) { + pr_warn("api uid mismatch\n"); + return ERR_PTR(-EINVAL); + } + + if (!optee_msg_api_revision_is_compatible(invoke_fn)) { + pr_warn("api revision mismatch\n"); + return ERR_PTR(-EINVAL); + } + + if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) { + pr_warn("capabilities mismatch\n"); + return ERR_PTR(-EINVAL); + } + + /* + * We have no other option for shared memory, if secure world + * doesn't have any reserved memory we can use we can't continue. + */ + if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) + return ERR_PTR(-EINVAL); + + pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm); + if (IS_ERR(pool)) + return (void *)pool; + + optee = kzalloc(sizeof(*optee), GFP_KERNEL); + if (!optee) { + rc = -ENOMEM; + goto err; + } + + optee->invoke_fn = invoke_fn; + + teedev = tee_device_alloc(&optee_desc, NULL, pool, optee); + if (IS_ERR(teedev)) { + rc = PTR_ERR(teedev); + goto err; + } + optee->teedev = teedev; + + teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee); + if (IS_ERR(teedev)) { + rc = PTR_ERR(teedev); + goto err; + } + optee->supp_teedev = teedev; + + rc = tee_device_register(optee->teedev); + if (rc) + goto err; + + rc = tee_device_register(optee->supp_teedev); + if (rc) + goto err; + + mutex_init(&optee->call_queue.mutex); + INIT_LIST_HEAD(&optee->call_queue.waiters); + optee_wait_queue_init(&optee->wait_queue); + optee_supp_init(&optee->supp); + optee->memremaped_shm = memremaped_shm; + optee->pool = pool; + + optee_enable_shm_cache(optee); + + pr_info("initialized driver\n"); + return optee; +err: + if (optee) { + /* + * tee_device_unregister() is safe to call even if the + * devices hasn't been registered with + * tee_device_register() yet. + */ + tee_device_unregister(optee->supp_teedev); + tee_device_unregister(optee->teedev); + kfree(optee); + } + if (pool) + tee_shm_pool_free(pool); + if (memremaped_shm) + memunmap(memremaped_shm); + return ERR_PTR(rc); +} + +static void optee_remove(struct optee *optee) +{ + /* + * Ask OP-TEE to free all cached shared memory objects to decrease + * reference counters and also avoid wild pointers in secure world + * into the old shared memory range. + */ + optee_disable_shm_cache(optee); + + /* + * The two devices has to be unregistered before we can free the + * other resources. + */ + tee_device_unregister(optee->supp_teedev); + tee_device_unregister(optee->teedev); + + tee_shm_pool_free(optee->pool); + if (optee->memremaped_shm) + memunmap(optee->memremaped_shm); + optee_wait_queue_exit(&optee->wait_queue); + optee_supp_uninit(&optee->supp); + mutex_destroy(&optee->call_queue.mutex); + + kfree(optee); +} + +static const struct of_device_id optee_match[] = { + { .compatible = "linaro,optee-tz" }, + {}, +}; + +static struct optee *optee_svc; + +static int __init optee_driver_init(void) +{ + struct device_node *fw_np; + struct device_node *np; + struct optee *optee; + + /* Node is supposed to be below /firmware */ + fw_np = of_find_node_by_name(NULL, "firmware"); + if (!fw_np) + return -ENODEV; + + np = of_find_matching_node(fw_np, optee_match); + of_node_put(fw_np); + if (!np) + return -ENODEV; + + optee = optee_probe(np); + of_node_put(np); + + if (IS_ERR(optee)) + return PTR_ERR(optee); + + optee_svc = optee; + + return 0; +} +module_init(optee_driver_init); + +static void __exit optee_driver_exit(void) +{ + struct optee *optee = optee_svc; + + optee_svc = NULL; + if (optee) + optee_remove(optee); +} +module_exit(optee_driver_exit); + +MODULE_AUTHOR("Linaro"); +MODULE_DESCRIPTION("OP-TEE driver"); +MODULE_SUPPORTED_DEVICE(""); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h new file mode 100644 index 000000000000..dd7a06ee0462 --- /dev/null +++ b/drivers/tee/optee/optee_msg.h @@ -0,0 +1,418 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _OPTEE_MSG_H +#define _OPTEE_MSG_H + +#include <linux/bitops.h> +#include <linux/types.h> + +/* + * This file defines the OP-TEE message protocol used to communicate + * with an instance of OP-TEE running in secure world. + * + * This file is divided into three sections. + * 1. Formatting of messages. + * 2. Requests from normal world + * 3. Requests from secure world, Remote Procedure Call (RPC), handled by + * tee-supplicant. + */ + +/***************************************************************************** + * Part 1 - formatting of messages + *****************************************************************************/ + +#define OPTEE_MSG_ATTR_TYPE_NONE 0x0 +#define OPTEE_MSG_ATTR_TYPE_VALUE_INPUT 0x1 +#define OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT 0x2 +#define OPTEE_MSG_ATTR_TYPE_VALUE_INOUT 0x3 +#define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 0x5 +#define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 0x6 +#define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 0x7 +#define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9 +#define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa +#define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb + +#define OPTEE_MSG_ATTR_TYPE_MASK GENMASK(7, 0) + +/* + * Meta parameter to be absorbed by the Secure OS and not passed + * to the Trusted Application. + * + * Currently only used with OPTEE_MSG_CMD_OPEN_SESSION. + */ +#define OPTEE_MSG_ATTR_META BIT(8) + +/* + * The temporary shared memory object is not physically contigous and this + * temp memref is followed by another fragment until the last temp memref + * that doesn't have this bit set. + */ +#define OPTEE_MSG_ATTR_FRAGMENT BIT(9) + +/* + * Memory attributes for caching passed with temp memrefs. The actual value + * used is defined outside the message protocol with the exception of + * OPTEE_MSG_ATTR_CACHE_PREDEFINED which means the attributes already + * defined for the memory range should be used. If optee_smc.h is used as + * bearer of this protocol OPTEE_SMC_SHM_* is used for values. + */ +#define OPTEE_MSG_ATTR_CACHE_SHIFT 16 +#define OPTEE_MSG_ATTR_CACHE_MASK GENMASK(2, 0) +#define OPTEE_MSG_ATTR_CACHE_PREDEFINED 0 + +/* + * Same values as TEE_LOGIN_* from TEE Internal API + */ +#define OPTEE_MSG_LOGIN_PUBLIC 0x00000000 +#define OPTEE_MSG_LOGIN_USER 0x00000001 +#define OPTEE_MSG_LOGIN_GROUP 0x00000002 +#define OPTEE_MSG_LOGIN_APPLICATION 0x00000004 +#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005 +#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006 + +/** + * struct optee_msg_param_tmem - temporary memory reference parameter + * @buf_ptr: Address of the buffer + * @size: Size of the buffer + * @shm_ref: Temporary shared memory reference, pointer to a struct tee_shm + * + * Secure and normal world communicates pointers as physical address + * instead of the virtual address. This is because secure and normal world + * have completely independent memory mapping. Normal world can even have a + * hypervisor which need to translate the guest physical address (AKA IPA + * in ARM documentation) to a real physical address before passing the + * structure to secure world. + */ +struct optee_msg_param_tmem { + u64 buf_ptr; + u64 size; + u64 shm_ref; +}; + +/** + * struct optee_msg_param_rmem - registered memory reference parameter + * @offs: Offset into shared memory reference + * @size: Size of the buffer + * @shm_ref: Shared memory reference, pointer to a struct tee_shm + */ +struct optee_msg_param_rmem { + u64 offs; + u64 size; + u64 shm_ref; +}; + +/** + * struct optee_msg_param_value - opaque value parameter + * + * Value parameters are passed unchecked between normal and secure world. + */ +struct optee_msg_param_value { + u64 a; + u64 b; + u64 c; +}; + +/** + * struct optee_msg_param - parameter used together with struct optee_msg_arg + * @attr: attributes + * @tmem: parameter by temporary memory reference + * @rmem: parameter by registered memory reference + * @value: parameter by opaque value + * + * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in + * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value, + * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates tmem and + * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates rmem. + * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used. + */ +struct optee_msg_param { + u64 attr; + union { + struct optee_msg_param_tmem tmem; + struct optee_msg_param_rmem rmem; + struct optee_msg_param_value value; + } u; +}; + +/** + * struct optee_msg_arg - call argument + * @cmd: Command, one of OPTEE_MSG_CMD_* or OPTEE_MSG_RPC_CMD_* + * @func: Trusted Application function, specific to the Trusted Application, + * used if cmd == OPTEE_MSG_CMD_INVOKE_COMMAND + * @session: In parameter for all OPTEE_MSG_CMD_* except + * OPTEE_MSG_CMD_OPEN_SESSION where it's an output parameter instead + * @cancel_id: Cancellation id, a unique value to identify this request + * @ret: return value + * @ret_origin: origin of the return value + * @num_params: number of parameters supplied to the OS Command + * @params: the parameters supplied to the OS Command + * + * All normal calls to Trusted OS uses this struct. If cmd requires further + * information than what these field holds it can be passed as a parameter + * tagged as meta (setting the OPTEE_MSG_ATTR_META bit in corresponding + * attrs field). All parameters tagged as meta has to come first. + * + * Temp memref parameters can be fragmented if supported by the Trusted OS + * (when optee_smc.h is bearer of this protocol this is indicated with + * OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM). If a logical memref parameter is + * fragmented then has all but the last fragment the + * OPTEE_MSG_ATTR_FRAGMENT bit set in attrs. Even if a memref is fragmented + * it will still be presented as a single logical memref to the Trusted + * Application. + */ +struct optee_msg_arg { + u32 cmd; + u32 func; + u32 session; + u32 cancel_id; + u32 pad; + u32 ret; + u32 ret_origin; + u32 num_params; + + /* num_params tells the actual number of element in params */ + struct optee_msg_param params[0]; +}; + +/** + * OPTEE_MSG_GET_ARG_SIZE - return size of struct optee_msg_arg + * + * @num_params: Number of parameters embedded in the struct optee_msg_arg + * + * Returns the size of the struct optee_msg_arg together with the number + * of embedded parameters. + */ +#define OPTEE_MSG_GET_ARG_SIZE(num_params) \ + (sizeof(struct optee_msg_arg) + \ + sizeof(struct optee_msg_param) * (num_params)) + +/***************************************************************************** + * Part 2 - requests from normal world + *****************************************************************************/ + +/* + * Return the following UID if using API specified in this file without + * further extensions: + * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b. + * Represented in 4 32-bit words in OPTEE_MSG_UID_0, OPTEE_MSG_UID_1, + * OPTEE_MSG_UID_2, OPTEE_MSG_UID_3. + */ +#define OPTEE_MSG_UID_0 0x384fb3e0 +#define OPTEE_MSG_UID_1 0xe7f811e3 +#define OPTEE_MSG_UID_2 0xaf630002 +#define OPTEE_MSG_UID_3 0xa5d5c51b +#define OPTEE_MSG_FUNCID_CALLS_UID 0xFF01 + +/* + * Returns 2.0 if using API specified in this file without further + * extensions. Represented in 2 32-bit words in OPTEE_MSG_REVISION_MAJOR + * and OPTEE_MSG_REVISION_MINOR + */ +#define OPTEE_MSG_REVISION_MAJOR 2 +#define OPTEE_MSG_REVISION_MINOR 0 +#define OPTEE_MSG_FUNCID_CALLS_REVISION 0xFF03 + +/* + * Get UUID of Trusted OS. + * + * Used by non-secure world to figure out which Trusted OS is installed. + * Note that returned UUID is the UUID of the Trusted OS, not of the API. + * + * Returns UUID in 4 32-bit words in the same way as + * OPTEE_MSG_FUNCID_CALLS_UID described above. + */ +#define OPTEE_MSG_OS_OPTEE_UUID_0 0x486178e0 +#define OPTEE_MSG_OS_OPTEE_UUID_1 0xe7f811e3 +#define OPTEE_MSG_OS_OPTEE_UUID_2 0xbc5e0002 +#define OPTEE_MSG_OS_OPTEE_UUID_3 0xa5d5c51b +#define OPTEE_MSG_FUNCID_GET_OS_UUID 0x0000 + +/* + * Get revision of Trusted OS. + * + * Used by non-secure world to figure out which version of the Trusted OS + * is installed. Note that the returned revision is the revision of the + * Trusted OS, not of the API. + * + * Returns revision in 2 32-bit words in the same way as + * OPTEE_MSG_CALLS_REVISION described above. + */ +#define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001 + +/* + * Do a secure call with struct optee_msg_arg as argument + * The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd + * + * OPTEE_MSG_CMD_OPEN_SESSION opens a session to a Trusted Application. + * The first two parameters are tagged as meta, holding two value + * parameters to pass the following information: + * param[0].u.value.a-b uuid of Trusted Application + * param[1].u.value.a-b uuid of Client + * param[1].u.value.c Login class of client OPTEE_MSG_LOGIN_* + * + * OPTEE_MSG_CMD_INVOKE_COMMAND invokes a command a previously opened + * session to a Trusted Application. struct optee_msg_arg::func is Trusted + * Application function, specific to the Trusted Application. + * + * OPTEE_MSG_CMD_CLOSE_SESSION closes a previously opened session to + * Trusted Application. + * + * OPTEE_MSG_CMD_CANCEL cancels a currently invoked command. + * + * OPTEE_MSG_CMD_REGISTER_SHM registers a shared memory reference. The + * information is passed as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + * [| OPTEE_MSG_ATTR_FRAGMENT] + * [in] param[0].u.tmem.buf_ptr physical address (of first fragment) + * [in] param[0].u.tmem.size size (of first fragment) + * [in] param[0].u.tmem.shm_ref holds shared memory reference + * ... + * The shared memory can optionally be fragmented, temp memrefs can follow + * each other with all but the last with the OPTEE_MSG_ATTR_FRAGMENT bit set. + * + * OPTEE_MSG_CMD_UNREGISTER_SHM unregisteres a previously registered shared + * memory reference. The information is passed as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + * [in] param[0].u.rmem.shm_ref holds shared memory reference + * [in] param[0].u.rmem.offs 0 + * [in] param[0].u.rmem.size 0 + */ +#define OPTEE_MSG_CMD_OPEN_SESSION 0 +#define OPTEE_MSG_CMD_INVOKE_COMMAND 1 +#define OPTEE_MSG_CMD_CLOSE_SESSION 2 +#define OPTEE_MSG_CMD_CANCEL 3 +#define OPTEE_MSG_CMD_REGISTER_SHM 4 +#define OPTEE_MSG_CMD_UNREGISTER_SHM 5 +#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004 + +/***************************************************************************** + * Part 3 - Requests from secure world, RPC + *****************************************************************************/ + +/* + * All RPC is done with a struct optee_msg_arg as bearer of information, + * struct optee_msg_arg::arg holds values defined by OPTEE_MSG_RPC_CMD_* below + * + * RPC communication with tee-supplicant is reversed compared to normal + * client communication desribed above. The supplicant receives requests + * and sends responses. + */ + +/* + * Load a TA into memory, defined in tee-supplicant + */ +#define OPTEE_MSG_RPC_CMD_LOAD_TA 0 + +/* + * Reserved + */ +#define OPTEE_MSG_RPC_CMD_RPMB 1 + +/* + * File system access, defined in tee-supplicant + */ +#define OPTEE_MSG_RPC_CMD_FS 2 + +/* + * Get time + * + * Returns number of seconds and nano seconds since the Epoch, + * 1970-01-01 00:00:00 +0000 (UTC). + * + * [out] param[0].u.value.a Number of seconds + * [out] param[0].u.value.b Number of nano seconds. + */ +#define OPTEE_MSG_RPC_CMD_GET_TIME 3 + +/* + * Wait queue primitive, helper for secure world to implement a wait queue. + * + * If secure world need to wait for a secure world mutex it issues a sleep + * request instead of spinning in secure world. Conversely is a wakeup + * request issued when a secure world mutex with a thread waiting thread is + * unlocked. + * + * Waiting on a key + * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP + * [in] param[0].u.value.b wait key + * + * Waking up a key + * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP + * [in] param[0].u.value.b wakeup key + */ +#define OPTEE_MSG_RPC_CMD_WAIT_QUEUE 4 +#define OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP 0 +#define OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP 1 + +/* + * Suspend execution + * + * [in] param[0].value .a number of milliseconds to suspend + */ +#define OPTEE_MSG_RPC_CMD_SUSPEND 5 + +/* + * Allocate a piece of shared memory + * + * Shared memory can optionally be fragmented, to support that additional + * spare param entries are allocated to make room for eventual fragments. + * The spare param entries has .attr = OPTEE_MSG_ATTR_TYPE_NONE when + * unused. All returned temp memrefs except the last should have the + * OPTEE_MSG_ATTR_FRAGMENT bit set in the attr field. + * + * [in] param[0].u.value.a type of memory one of + * OPTEE_MSG_RPC_SHM_TYPE_* below + * [in] param[0].u.value.b requested size + * [in] param[0].u.value.c required alignment + * + * [out] param[0].u.tmem.buf_ptr physical address (of first fragment) + * [out] param[0].u.tmem.size size (of first fragment) + * [out] param[0].u.tmem.shm_ref shared memory reference + * ... + * [out] param[n].u.tmem.buf_ptr physical address + * [out] param[n].u.tmem.size size + * [out] param[n].u.tmem.shm_ref shared memory reference (same value + * as in param[n-1].u.tmem.shm_ref) + */ +#define OPTEE_MSG_RPC_CMD_SHM_ALLOC 6 +/* Memory that can be shared with a non-secure user space application */ +#define OPTEE_MSG_RPC_SHM_TYPE_APPL 0 +/* Memory only shared with non-secure kernel */ +#define OPTEE_MSG_RPC_SHM_TYPE_KERNEL 1 + +/* + * Free shared memory previously allocated with OPTEE_MSG_RPC_CMD_SHM_ALLOC + * + * [in] param[0].u.value.a type of memory one of + * OPTEE_MSG_RPC_SHM_TYPE_* above + * [in] param[0].u.value.b value of shared memory reference + * returned in param[0].u.tmem.shm_ref + * above + */ +#define OPTEE_MSG_RPC_CMD_SHM_FREE 7 + +#endif /* _OPTEE_MSG_H */ diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h new file mode 100644 index 000000000000..c374cd594314 --- /dev/null +++ b/drivers/tee/optee/optee_private.h @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef OPTEE_PRIVATE_H +#define OPTEE_PRIVATE_H + +#include <linux/arm-smccc.h> +#include <linux/semaphore.h> +#include <linux/tee_drv.h> +#include <linux/types.h> +#include "optee_msg.h" + +#define OPTEE_MAX_ARG_SIZE 1024 + +/* Some Global Platform error codes used in this driver */ +#define TEEC_SUCCESS 0x00000000 +#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006 +#define TEEC_ERROR_COMMUNICATION 0xFFFF000E +#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C + +#define TEEC_ORIGIN_COMMS 0x00000002 + +typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long, + struct arm_smccc_res *); + +struct optee_call_queue { + /* Serializes access to this struct */ + struct mutex mutex; + struct list_head waiters; +}; + +struct optee_wait_queue { + /* Serializes access to this struct */ + struct mutex mu; + struct list_head db; +}; + +/** + * struct optee_supp - supplicant synchronization struct + * @ctx the context of current connected supplicant. + * if !NULL the supplicant device is available for use, + * else busy + * @ctx_mutex: held while accessing @ctx + * @func: supplicant function id to call + * @ret: call return value + * @num_params: number of elements in @param + * @param: parameters for @func + * @req_posted: if true, a request has been posted to the supplicant + * @supp_next_send: if true, next step is for supplicant to send response + * @thrd_mutex: held by the thread doing a request to supplicant + * @supp_mutex: held by supplicant while operating on this struct + * @data_to_supp: supplicant is waiting on this for next request + * @data_from_supp: requesting thread is waiting on this to get the result + */ +struct optee_supp { + struct tee_context *ctx; + /* Serializes access of ctx */ + struct mutex ctx_mutex; + + u32 func; + u32 ret; + size_t num_params; + struct tee_param *param; + + bool req_posted; + bool supp_next_send; + /* Serializes access to this struct for requesting thread */ + struct mutex thrd_mutex; + /* Serializes access to this struct for supplicant threads */ + struct mutex supp_mutex; + struct completion data_to_supp; + struct completion data_from_supp; +}; + +/** + * struct optee - main service struct + * @supp_teedev: supplicant device + * @teedev: client device + * @invoke_fn: function to issue smc or hvc + * @call_queue: queue of threads waiting to call @invoke_fn + * @wait_queue: queue of threads from secure world waiting for a + * secure world sync object + * @supp: supplicant synchronization struct for RPC to supplicant + * @pool: shared memory pool + * @memremaped_shm virtual address of memory in shared memory pool + */ +struct optee { + struct tee_device *supp_teedev; + struct tee_device *teedev; + optee_invoke_fn *invoke_fn; + struct optee_call_queue call_queue; + struct optee_wait_queue wait_queue; + struct optee_supp supp; + struct tee_shm_pool *pool; + void *memremaped_shm; +}; + +struct optee_session { + struct list_head list_node; + u32 session_id; +}; + +struct optee_context_data { + /* Serializes access to this struct */ + struct mutex mutex; + struct list_head sess_list; +}; + +struct optee_rpc_param { + u32 a0; + u32 a1; + u32 a2; + u32 a3; + u32 a4; + u32 a5; + u32 a6; + u32 a7; +}; + +void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param); + +void optee_wait_queue_init(struct optee_wait_queue *wq); +void optee_wait_queue_exit(struct optee_wait_queue *wq); + +u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, + struct tee_param *param); + +int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len); +int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len); +void optee_supp_init(struct optee_supp *supp); +void optee_supp_uninit(struct optee_supp *supp); + +int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, + struct tee_param *param); +int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params, + struct tee_param *param); + +u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg); +int optee_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param); +int optee_close_session(struct tee_context *ctx, u32 session); +int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, + struct tee_param *param); +int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session); + +void optee_enable_shm_cache(struct optee *optee); +void optee_disable_shm_cache(struct optee *optee); + +int optee_from_msg_param(struct tee_param *params, size_t num_params, + const struct optee_msg_param *msg_params); +int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, + const struct tee_param *params); + +/* + * Small helpers + */ + +static inline void *reg_pair_to_ptr(u32 reg0, u32 reg1) +{ + return (void *)(unsigned long)(((u64)reg0 << 32) | reg1); +} + +static inline void reg_pair_from_64(u32 *reg0, u32 *reg1, u64 val) +{ + *reg0 = val >> 32; + *reg1 = val; +} + +#endif /*OPTEE_PRIVATE_H*/ diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h new file mode 100644 index 000000000000..13b7c98cdf25 --- /dev/null +++ b/drivers/tee/optee/optee_smc.h @@ -0,0 +1,450 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef OPTEE_SMC_H +#define OPTEE_SMC_H + +#include <linux/arm-smccc.h> +#include <linux/bitops.h> + +#define OPTEE_SMC_STD_CALL_VAL(func_num) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS, (func_num)) +#define OPTEE_SMC_FAST_CALL_VAL(func_num) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS, (func_num)) + +/* + * Function specified by SMC Calling convention. + */ +#define OPTEE_SMC_FUNCID_CALLS_COUNT 0xFF00 +#define OPTEE_SMC_CALLS_COUNT \ + ARM_SMCCC_CALL_VAL(OPTEE_SMC_FAST_CALL, SMCCC_SMC_32, \ + SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_COUNT) + +/* + * Normal cached memory (write-back), shareable for SMP systems and not + * shareable for UP systems. + */ +#define OPTEE_SMC_SHM_CACHED 1 + +/* + * a0..a7 is used as register names in the descriptions below, on arm32 + * that translates to r0..r7 and on arm64 to w0..w7. In both cases it's + * 32-bit registers. + */ + +/* + * Function specified by SMC Calling convention + * + * Return one of the following UIDs if using API specified in this file + * without further extentions: + * 65cb6b93-af0c-4617-8ed6-644a8d1140f8 + * see also OPTEE_SMC_UID_* in optee_msg.h + */ +#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID +#define OPTEE_SMC_CALLS_UID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_UID) + +/* + * Function specified by SMC Calling convention + * + * Returns 2.0 if using API specified in this file without further extentions. + * see also OPTEE_MSG_REVISION_* in optee_msg.h + */ +#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION +#define OPTEE_SMC_CALLS_REVISION \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_REVISION) + +struct optee_smc_calls_revision_result { + unsigned long major; + unsigned long minor; + unsigned long reserved0; + unsigned long reserved1; +}; + +/* + * Get UUID of Trusted OS. + * + * Used by non-secure world to figure out which Trusted OS is installed. + * Note that returned UUID is the UUID of the Trusted OS, not of the API. + * + * Returns UUID in a0-4 in the same way as OPTEE_SMC_CALLS_UID + * described above. + */ +#define OPTEE_SMC_FUNCID_GET_OS_UUID OPTEE_MSG_FUNCID_GET_OS_UUID +#define OPTEE_SMC_CALL_GET_OS_UUID \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_UUID) + +/* + * Get revision of Trusted OS. + * + * Used by non-secure world to figure out which version of the Trusted OS + * is installed. Note that the returned revision is the revision of the + * Trusted OS, not of the API. + * + * Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION + * described above. + */ +#define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION +#define OPTEE_SMC_CALL_GET_OS_REVISION \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION) + +/* + * Call with struct optee_msg_arg as argument + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG + * a1 Upper 32bit of a 64bit physical pointer to a struct optee_msg_arg + * a2 Lower 32bit of a 64bit physical pointer to a struct optee_msg_arg + * a3 Cache settings, not used if physical pointer is in a predefined shared + * memory area else per OPTEE_SMC_SHM_* + * a4-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 Return value, OPTEE_SMC_RETURN_* + * a1-3 Not used + * a4-7 Preserved + * + * OPTEE_SMC_RETURN_ETHREAD_LIMIT return register usage: + * a0 Return value, OPTEE_SMC_RETURN_ETHREAD_LIMIT + * a1-3 Preserved + * a4-7 Preserved + * + * RPC return register usage: + * a0 Return value, OPTEE_SMC_RETURN_IS_RPC(val) + * a1-2 RPC parameters + * a3-7 Resume information, must be preserved + * + * Possible return values: + * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this + * function. + * OPTEE_SMC_RETURN_OK Call completed, result updated in + * the previously supplied struct + * optee_msg_arg. + * OPTEE_SMC_RETURN_ETHREAD_LIMIT Number of Trusted OS threads exceeded, + * try again later. + * OPTEE_SMC_RETURN_EBADADDR Bad physcial pointer to struct + * optee_msg_arg. + * OPTEE_SMC_RETURN_EBADCMD Bad/unknown cmd in struct optee_msg_arg + * OPTEE_SMC_RETURN_IS_RPC() Call suspended by RPC call to normal + * world. + */ +#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG +#define OPTEE_SMC_CALL_WITH_ARG \ + OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG) + +/* + * Get Shared Memory Config + * + * Returns the Secure/Non-secure shared memory config. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_GET_SHM_CONFIG + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Have config return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 Physical address of start of SHM + * a2 Size of of SHM + * a3 Cache settings of memory, as defined by the + * OPTEE_SMC_SHM_* values above + * a4-7 Preserved + * + * Not available register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL + * a1-3 Not used + * a4-7 Preserved + */ +#define OPTEE_SMC_FUNCID_GET_SHM_CONFIG 7 +#define OPTEE_SMC_GET_SHM_CONFIG \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_SHM_CONFIG) + +struct optee_smc_get_shm_config_result { + unsigned long status; + unsigned long start; + unsigned long size; + unsigned long settings; +}; + +/* + * Exchanges capabilities between normal world and secure world + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_EXCHANGE_CAPABILITIES + * a1 bitfield of normal world capabilities OPTEE_SMC_NSEC_CAP_* + * a2-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* + * a2-7 Preserved + * + * Error return register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world + * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* + * a2-7 Preserved + */ +/* Normal world works as a uniprocessor system */ +#define OPTEE_SMC_NSEC_CAP_UNIPROCESSOR BIT(0) +/* Secure world has reserved shared memory for normal world to use */ +#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0) +/* Secure world can communicate via previously unregistered shared memory */ +#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1) +#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 +#define OPTEE_SMC_EXCHANGE_CAPABILITIES \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES) + +struct optee_smc_exchange_capabilities_result { + unsigned long status; + unsigned long capabilities; + unsigned long reserved0; + unsigned long reserved1; +}; + +/* + * Disable and empties cache of shared memory objects + * + * Secure world can cache frequently used shared memory objects, for + * example objects used as RPC arguments. When secure world is idle this + * function returns one shared memory reference to free. To disable the + * cache and free all cached objects this function has to be called until + * it returns OPTEE_SMC_RETURN_ENOTAVAIL. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_DISABLE_SHM_CACHE + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 Upper 32bit of a 64bit Shared memory cookie + * a2 Lower 32bit of a 64bit Shared memory cookie + * a3-7 Preserved + * + * Cache empty return register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL + * a1-7 Preserved + * + * Not idle return register usage: + * a0 OPTEE_SMC_RETURN_EBUSY + * a1-7 Preserved + */ +#define OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE 10 +#define OPTEE_SMC_DISABLE_SHM_CACHE \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE) + +struct optee_smc_disable_shm_cache_result { + unsigned long status; + unsigned long shm_upper32; + unsigned long shm_lower32; + unsigned long reserved0; +}; + +/* + * Enable cache of shared memory objects + * + * Secure world can cache frequently used shared memory objects, for + * example objects used as RPC arguments. When secure world is idle this + * function returns OPTEE_SMC_RETURN_OK and the cache is enabled. If + * secure world isn't idle OPTEE_SMC_RETURN_EBUSY is returned. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_ENABLE_SHM_CACHE + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1-7 Preserved + * + * Not idle return register usage: + * a0 OPTEE_SMC_RETURN_EBUSY + * a1-7 Preserved + */ +#define OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE 11 +#define OPTEE_SMC_ENABLE_SHM_CACHE \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE) + +/* + * Resume from RPC (for example after processing an IRQ) + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC + * a1-3 Value of a1-3 when OPTEE_SMC_CALL_WITH_ARG returned + * OPTEE_SMC_RETURN_RPC in a0 + * + * Return register usage is the same as for OPTEE_SMC_*CALL_WITH_ARG above. + * + * Possible return values + * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this + * function. + * OPTEE_SMC_RETURN_OK Original call completed, result + * updated in the previously supplied. + * struct optee_msg_arg + * OPTEE_SMC_RETURN_RPC Call suspended by RPC call to normal + * world. + * OPTEE_SMC_RETURN_ERESUME Resume failed, the opaque resume + * information was corrupt. + */ +#define OPTEE_SMC_FUNCID_RETURN_FROM_RPC 3 +#define OPTEE_SMC_CALL_RETURN_FROM_RPC \ + OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_RETURN_FROM_RPC) + +#define OPTEE_SMC_RETURN_RPC_PREFIX_MASK 0xFFFF0000 +#define OPTEE_SMC_RETURN_RPC_PREFIX 0xFFFF0000 +#define OPTEE_SMC_RETURN_RPC_FUNC_MASK 0x0000FFFF + +#define OPTEE_SMC_RETURN_GET_RPC_FUNC(ret) \ + ((ret) & OPTEE_SMC_RETURN_RPC_FUNC_MASK) + +#define OPTEE_SMC_RPC_VAL(func) ((func) | OPTEE_SMC_RETURN_RPC_PREFIX) + +/* + * Allocate memory for RPC parameter passing. The memory is used to hold a + * struct optee_msg_arg. + * + * "Call" register usage: + * a0 This value, OPTEE_SMC_RETURN_RPC_ALLOC + * a1 Size in bytes of required argument memory + * a2 Not used + * a3 Resume information, must be preserved + * a4-5 Not used + * a6-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1 Upper 32bits of 64bit physical pointer to allocated + * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't + * be allocated. + * a2 Lower 32bits of 64bit physical pointer to allocated + * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't + * be allocated + * a3 Preserved + * a4 Upper 32bits of 64bit Shared memory cookie used when freeing + * the memory or doing an RPC + * a5 Lower 32bits of 64bit Shared memory cookie used when freeing + * the memory or doing an RPC + * a6-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_ALLOC 0 +#define OPTEE_SMC_RETURN_RPC_ALLOC \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_ALLOC) + +/* + * Free memory previously allocated by OPTEE_SMC_RETURN_RPC_ALLOC + * + * "Call" register usage: + * a0 This value, OPTEE_SMC_RETURN_RPC_FREE + * a1 Upper 32bits of 64bit shared memory cookie belonging to this + * argument memory + * a2 Lower 32bits of 64bit shared memory cookie belonging to this + * argument memory + * a3-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-2 Not used + * a3-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_FREE 2 +#define OPTEE_SMC_RETURN_RPC_FREE \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE) + +/* + * Deliver an IRQ in normal world. + * + * "Call" register usage: + * a0 OPTEE_SMC_RETURN_RPC_IRQ + * a1-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_IRQ 4 +#define OPTEE_SMC_RETURN_RPC_IRQ \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_IRQ) + +/* + * Do an RPC request. The supplied struct optee_msg_arg tells which + * request to do and the parameters for the request. The following fields + * are used (the rest are unused): + * - cmd the Request ID + * - ret return value of the request, filled in by normal world + * - num_params number of parameters for the request + * - params the parameters + * - param_attrs attributes of the parameters + * + * "Call" register usage: + * a0 OPTEE_SMC_RETURN_RPC_CMD + * a1 Upper 32bit of a 64bit Shared memory cookie holding a + * struct optee_msg_arg, must be preserved, only the data should + * be updated + * a2 Lower 32bit of a 64bit Shared memory cookie holding a + * struct optee_msg_arg, must be preserved, only the data should + * be updated + * a3-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-2 Not used + * a3-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_CMD 5 +#define OPTEE_SMC_RETURN_RPC_CMD \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_CMD) + +/* Returned in a0 */ +#define OPTEE_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF + +/* Returned in a0 only from Trusted OS functions */ +#define OPTEE_SMC_RETURN_OK 0x0 +#define OPTEE_SMC_RETURN_ETHREAD_LIMIT 0x1 +#define OPTEE_SMC_RETURN_EBUSY 0x2 +#define OPTEE_SMC_RETURN_ERESUME 0x3 +#define OPTEE_SMC_RETURN_EBADADDR 0x4 +#define OPTEE_SMC_RETURN_EBADCMD 0x5 +#define OPTEE_SMC_RETURN_ENOMEM 0x6 +#define OPTEE_SMC_RETURN_ENOTAVAIL 0x7 +#define OPTEE_SMC_RETURN_IS_RPC(ret) __optee_smc_return_is_rpc((ret)) + +static inline bool __optee_smc_return_is_rpc(u32 ret) +{ + return ret != OPTEE_SMC_RETURN_UNKNOWN_FUNCTION && + (ret & OPTEE_SMC_RETURN_RPC_PREFIX_MASK) == + OPTEE_SMC_RETURN_RPC_PREFIX; +} + +#endif /* OPTEE_SMC_H */ diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c new file mode 100644 index 000000000000..8814eca06021 --- /dev/null +++ b/drivers/tee/optee/rpc.c @@ -0,0 +1,396 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include "optee_private.h" +#include "optee_smc.h" + +struct wq_entry { + struct list_head link; + struct completion c; + u32 key; +}; + +void optee_wait_queue_init(struct optee_wait_queue *priv) +{ + mutex_init(&priv->mu); + INIT_LIST_HEAD(&priv->db); +} + +void optee_wait_queue_exit(struct optee_wait_queue *priv) +{ + mutex_destroy(&priv->mu); +} + +static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg) +{ + struct timespec64 ts; + + if (arg->num_params != 1) + goto bad; + if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != + OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT) + goto bad; + + getnstimeofday64(&ts); + arg->params[0].u.value.a = ts.tv_sec; + arg->params[0].u.value.b = ts.tv_nsec; + + arg->ret = TEEC_SUCCESS; + return; +bad: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +} + +static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key) +{ + struct wq_entry *w; + + mutex_lock(&wq->mu); + + list_for_each_entry(w, &wq->db, link) + if (w->key == key) + goto out; + + w = kmalloc(sizeof(*w), GFP_KERNEL); + if (w) { + init_completion(&w->c); + w->key = key; + list_add_tail(&w->link, &wq->db); + } +out: + mutex_unlock(&wq->mu); + return w; +} + +static void wq_sleep(struct optee_wait_queue *wq, u32 key) +{ + struct wq_entry *w = wq_entry_get(wq, key); + + if (w) { + wait_for_completion(&w->c); + mutex_lock(&wq->mu); + list_del(&w->link); + mutex_unlock(&wq->mu); + kfree(w); + } +} + +static void wq_wakeup(struct optee_wait_queue *wq, u32 key) +{ + struct wq_entry *w = wq_entry_get(wq, key); + + if (w) + complete(&w->c); +} + +static void handle_rpc_func_cmd_wq(struct optee *optee, + struct optee_msg_arg *arg) +{ + if (arg->num_params != 1) + goto bad; + + if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) + goto bad; + + switch (arg->params[0].u.value.a) { + case OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP: + wq_sleep(&optee->wait_queue, arg->params[0].u.value.b); + break; + case OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP: + wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b); + break; + default: + goto bad; + } + + arg->ret = TEEC_SUCCESS; + return; +bad: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +} + +static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg) +{ + u32 msec_to_wait; + + if (arg->num_params != 1) + goto bad; + + if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) + goto bad; + + msec_to_wait = arg->params[0].u.value.a; + + /* set task's state to interruptible sleep */ + set_current_state(TASK_INTERRUPTIBLE); + + /* take a nap */ + msleep(msec_to_wait); + + arg->ret = TEEC_SUCCESS; + return; +bad: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +} + +static void handle_rpc_supp_cmd(struct tee_context *ctx, + struct optee_msg_arg *arg) +{ + struct tee_param *params; + + arg->ret_origin = TEEC_ORIGIN_COMMS; + + params = kmalloc_array(arg->num_params, sizeof(struct tee_param), + GFP_KERNEL); + if (!params) { + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + return; + } + + if (optee_from_msg_param(params, arg->num_params, arg->params)) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + goto out; + } + + arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params); + + if (optee_to_msg_param(arg->params, arg->num_params, params)) + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +out: + kfree(params); +} + +static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz) +{ + u32 ret; + struct tee_param param; + struct optee *optee = tee_get_drvdata(ctx->teedev); + struct tee_shm *shm; + + param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; + param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL; + param.u.value.b = sz; + param.u.value.c = 0; + + ret = optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, ¶m); + if (ret) + return ERR_PTR(-ENOMEM); + + mutex_lock(&optee->supp.ctx_mutex); + /* Increases count as secure world doesn't have a reference */ + shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c); + mutex_unlock(&optee->supp.ctx_mutex); + return shm; +} + +static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, + struct optee_msg_arg *arg) +{ + phys_addr_t pa; + struct tee_shm *shm; + size_t sz; + size_t n; + + arg->ret_origin = TEEC_ORIGIN_COMMS; + + if (!arg->num_params || + arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + for (n = 1; n < arg->num_params; n++) { + if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + } + + sz = arg->params[0].u.value.b; + switch (arg->params[0].u.value.a) { + case OPTEE_MSG_RPC_SHM_TYPE_APPL: + shm = cmd_alloc_suppl(ctx, sz); + break; + case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: + shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED); + break; + default: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + if (IS_ERR(shm)) { + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + return; + } + + if (tee_shm_get_pa(shm, 0, &pa)) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + goto bad; + } + + arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT; + arg->params[0].u.tmem.buf_ptr = pa; + arg->params[0].u.tmem.size = sz; + arg->params[0].u.tmem.shm_ref = (unsigned long)shm; + arg->ret = TEEC_SUCCESS; + return; +bad: + tee_shm_free(shm); +} + +static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm) +{ + struct tee_param param; + + param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; + param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL; + param.u.value.b = tee_shm_get_id(shm); + param.u.value.c = 0; + + /* + * Match the tee_shm_get_from_id() in cmd_alloc_suppl() as secure + * world has released its reference. + * + * It's better to do this before sending the request to supplicant + * as we'd like to let the process doing the initial allocation to + * do release the last reference too in order to avoid stacking + * many pending fput() on the client process. This could otherwise + * happen if secure world does many allocate and free in a single + * invoke. + */ + tee_shm_put(shm); + + optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_FREE, 1, ¶m); +} + +static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx, + struct optee_msg_arg *arg) +{ + struct tee_shm *shm; + + arg->ret_origin = TEEC_ORIGIN_COMMS; + + if (arg->num_params != 1 || + arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b; + switch (arg->params[0].u.value.a) { + case OPTEE_MSG_RPC_SHM_TYPE_APPL: + cmd_free_suppl(ctx, shm); + break; + case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: + tee_shm_free(shm); + break; + default: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + } + arg->ret = TEEC_SUCCESS; +} + +static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, + struct tee_shm *shm) +{ + struct optee_msg_arg *arg; + + arg = tee_shm_get_va(shm, 0); + if (IS_ERR(arg)) { + pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm); + return; + } + + switch (arg->cmd) { + case OPTEE_MSG_RPC_CMD_GET_TIME: + handle_rpc_func_cmd_get_time(arg); + break; + case OPTEE_MSG_RPC_CMD_WAIT_QUEUE: + handle_rpc_func_cmd_wq(optee, arg); + break; + case OPTEE_MSG_RPC_CMD_SUSPEND: + handle_rpc_func_cmd_wait(arg); + break; + case OPTEE_MSG_RPC_CMD_SHM_ALLOC: + handle_rpc_func_cmd_shm_alloc(ctx, arg); + break; + case OPTEE_MSG_RPC_CMD_SHM_FREE: + handle_rpc_func_cmd_shm_free(ctx, arg); + break; + default: + handle_rpc_supp_cmd(ctx, arg); + } +} + +/** + * optee_handle_rpc() - handle RPC from secure world + * @ctx: context doing the RPC + * @param: value of registers for the RPC + * + * Result of RPC is written back into @param. + */ +void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param) +{ + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct tee_shm *shm; + phys_addr_t pa; + + switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { + case OPTEE_SMC_RPC_FUNC_ALLOC: + shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED); + if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { + reg_pair_from_64(¶m->a1, ¶m->a2, pa); + reg_pair_from_64(¶m->a4, ¶m->a5, + (unsigned long)shm); + } else { + param->a1 = 0; + param->a2 = 0; + param->a4 = 0; + param->a5 = 0; + } + break; + case OPTEE_SMC_RPC_FUNC_FREE: + shm = reg_pair_to_ptr(param->a1, param->a2); + tee_shm_free(shm); + break; + case OPTEE_SMC_RPC_FUNC_IRQ: + /* + * An IRQ was raised while secure world was executing, + * since all IRQs are handled in Linux a dummy RPC is + * performed to let Linux take the IRQ through the normal + * vector. + */ + break; + case OPTEE_SMC_RPC_FUNC_CMD: + shm = reg_pair_to_ptr(param->a1, param->a2); + handle_rpc_func_cmd(ctx, optee, shm); + break; + default: + pr_warn("Unknown RPC func 0x%x\n", + (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)); + break; + } + + param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC; +} diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c new file mode 100644 index 000000000000..b4ea0678a436 --- /dev/null +++ b/drivers/tee/optee/supp.c @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include "optee_private.h" + +void optee_supp_init(struct optee_supp *supp) +{ + memset(supp, 0, sizeof(*supp)); + mutex_init(&supp->ctx_mutex); + mutex_init(&supp->thrd_mutex); + mutex_init(&supp->supp_mutex); + init_completion(&supp->data_to_supp); + init_completion(&supp->data_from_supp); +} + +void optee_supp_uninit(struct optee_supp *supp) +{ + mutex_destroy(&supp->ctx_mutex); + mutex_destroy(&supp->thrd_mutex); + mutex_destroy(&supp->supp_mutex); +} + +/** + * optee_supp_thrd_req() - request service from supplicant + * @ctx: context doing the request + * @func: function requested + * @num_params: number of elements in @param array + * @param: parameters for function + * + * Returns result of operation to be passed to secure world + */ +u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, + struct tee_param *param) +{ + bool interruptable; + struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_supp *supp = &optee->supp; + u32 ret; + + /* + * Other threads blocks here until we've copied our answer from + * supplicant. + */ + while (mutex_lock_interruptible(&supp->thrd_mutex)) { + /* See comment below on when the RPC can be interrupted. */ + mutex_lock(&supp->ctx_mutex); + interruptable = !supp->ctx; + mutex_unlock(&supp->ctx_mutex); + if (interruptable) + return TEEC_ERROR_COMMUNICATION; + } + + /* + * We have exclusive access now since the supplicant at this + * point is either doing a + * wait_for_completion_interruptible(&supp->data_to_supp) or is in + * userspace still about to do the ioctl() to enter + * optee_supp_recv() below. + */ + + supp->func = func; + supp->num_params = num_params; + supp->param = param; + supp->req_posted = true; + + /* Let supplicant get the data */ + complete(&supp->data_to_supp); + + /* + * Wait for supplicant to process and return result, once we've + * returned from wait_for_completion(data_from_supp) we have + * exclusive access again. + */ + while (wait_for_completion_interruptible(&supp->data_from_supp)) { + mutex_lock(&supp->ctx_mutex); + interruptable = !supp->ctx; + if (interruptable) { + /* + * There's no supplicant available and since the + * supp->ctx_mutex currently is held none can + * become available until the mutex released + * again. + * + * Interrupting an RPC to supplicant is only + * allowed as a way of slightly improving the user + * experience in case the supplicant hasn't been + * started yet. During normal operation the supplicant + * will serve all requests in a timely manner and + * interrupting then wouldn't make sense. + */ + supp->ret = TEEC_ERROR_COMMUNICATION; + init_completion(&supp->data_to_supp); + } + mutex_unlock(&supp->ctx_mutex); + if (interruptable) + break; + } + + ret = supp->ret; + supp->param = NULL; + supp->req_posted = false; + + /* We're done, let someone else talk to the supplicant now. */ + mutex_unlock(&supp->thrd_mutex); + + return ret; +} + +/** + * optee_supp_recv() - receive request for supplicant + * @ctx: context receiving the request + * @func: requested function in supplicant + * @num_params: number of elements allocated in @param, updated with number + * used elements + * @param: space for parameters for @func + * + * Returns 0 on success or <0 on failure + */ +int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, + struct tee_param *param) +{ + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct optee_supp *supp = &optee->supp; + int rc; + + /* + * In case two threads in one supplicant is calling this function + * simultaneously we need to protect the data with a mutex which + * we'll release before returning. + */ + mutex_lock(&supp->supp_mutex); + + if (supp->supp_next_send) { + /* + * optee_supp_recv() has been called again without + * a optee_supp_send() in between. Supplicant has + * probably been restarted before it was able to + * write back last result. Abort last request and + * wait for a new. + */ + if (supp->req_posted) { + supp->ret = TEEC_ERROR_COMMUNICATION; + supp->supp_next_send = false; + complete(&supp->data_from_supp); + } + } + + /* + * This is where supplicant will be hanging most of the + * time, let's make this interruptable so we can easily + * restart supplicant if needed. + */ + if (wait_for_completion_interruptible(&supp->data_to_supp)) { + rc = -ERESTARTSYS; + goto out; + } + + /* We have exlusive access to the data */ + + if (*num_params < supp->num_params) { + /* + * Not enough room for parameters, tell supplicant + * it failed and abort last request. + */ + supp->ret = TEEC_ERROR_COMMUNICATION; + rc = -EINVAL; + complete(&supp->data_from_supp); + goto out; + } + + *func = supp->func; + *num_params = supp->num_params; + memcpy(param, supp->param, + sizeof(struct tee_param) * supp->num_params); + + /* Allow optee_supp_send() below to do its work */ + supp->supp_next_send = true; + + rc = 0; +out: + mutex_unlock(&supp->supp_mutex); + return rc; +} + +/** + * optee_supp_send() - send result of request from supplicant + * @ctx: context sending result + * @ret: return value of request + * @num_params: number of parameters returned + * @param: returned parameters + * + * Returns 0 on success or <0 on failure. + */ +int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params, + struct tee_param *param) +{ + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct optee_supp *supp = &optee->supp; + size_t n; + int rc = 0; + + /* + * We still have exclusive access to the data since that's how we + * left it when returning from optee_supp_read(). + */ + + /* See comment on mutex in optee_supp_read() above */ + mutex_lock(&supp->supp_mutex); + + if (!supp->supp_next_send) { + /* + * Something strange is going on, supplicant shouldn't + * enter optee_supp_send() in this state + */ + rc = -ENOENT; + goto out; + } + + if (num_params != supp->num_params) { + /* + * Something is wrong, let supplicant restart. Next call to + * optee_supp_recv() will give an error to the requesting + * thread and release it. + */ + rc = -EINVAL; + goto out; + } + + /* Update out and in/out parameters */ + for (n = 0; n < num_params; n++) { + struct tee_param *p = supp->param + n; + + switch (p->attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + p->u.value.a = param[n].u.value.a; + p->u.value.b = param[n].u.value.b; + p->u.value.c = param[n].u.value.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + p->u.memref.size = param[n].u.memref.size; + break; + default: + break; + } + } + supp->ret = ret; + + /* Allow optee_supp_recv() above to do its work */ + supp->supp_next_send = false; + + /* Let the requesting thread continue */ + complete(&supp->data_from_supp); +out: + mutex_unlock(&supp->supp_mutex); + return rc; +} diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c new file mode 100644 index 000000000000..5c60bf4423e6 --- /dev/null +++ b/drivers/tee/tee_core.c @@ -0,0 +1,893 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/idr.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include <linux/uaccess.h> +#include "tee_private.h" + +#define TEE_NUM_DEVICES 32 + +#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x)) + +/* + * Unprivileged devices in the lower half range and privileged devices in + * the upper half range. + */ +static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES); +static DEFINE_SPINLOCK(driver_lock); + +static struct class *tee_class; +static dev_t tee_devt; + +static int tee_open(struct inode *inode, struct file *filp) +{ + int rc; + struct tee_device *teedev; + struct tee_context *ctx; + + teedev = container_of(inode->i_cdev, struct tee_device, cdev); + if (!tee_device_get(teedev)) + return -EINVAL; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + rc = -ENOMEM; + goto err; + } + + ctx->teedev = teedev; + INIT_LIST_HEAD(&ctx->list_shm); + filp->private_data = ctx; + rc = teedev->desc->ops->open(ctx); + if (rc) + goto err; + + return 0; +err: + kfree(ctx); + tee_device_put(teedev); + return rc; +} + +static int tee_release(struct inode *inode, struct file *filp) +{ + struct tee_context *ctx = filp->private_data; + struct tee_device *teedev = ctx->teedev; + struct tee_shm *shm; + + ctx->teedev->desc->ops->release(ctx); + mutex_lock(&ctx->teedev->mutex); + list_for_each_entry(shm, &ctx->list_shm, link) + shm->ctx = NULL; + mutex_unlock(&ctx->teedev->mutex); + kfree(ctx); + tee_device_put(teedev); + return 0; +} + +static int tee_ioctl_version(struct tee_context *ctx, + struct tee_ioctl_version_data __user *uvers) +{ + struct tee_ioctl_version_data vers; + + ctx->teedev->desc->ops->get_version(ctx->teedev, &vers); + if (copy_to_user(uvers, &vers, sizeof(vers))) + return -EFAULT; + return 0; +} + +static int tee_ioctl_shm_alloc(struct tee_context *ctx, + struct tee_ioctl_shm_alloc_data __user *udata) +{ + long ret; + struct tee_ioctl_shm_alloc_data data; + struct tee_shm *shm; + + if (copy_from_user(&data, udata, sizeof(data))) + return -EFAULT; + + /* Currently no input flags are supported */ + if (data.flags) + return -EINVAL; + + data.id = -1; + + shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + data.id = shm->id; + data.flags = shm->flags; + data.size = shm->size; + + if (copy_to_user(udata, &data, sizeof(data))) + ret = -EFAULT; + else + ret = tee_shm_get_fd(shm); + + /* + * When user space closes the file descriptor the shared memory + * should be freed or if tee_shm_get_fd() failed then it will + * be freed immediately. + */ + tee_shm_put(shm); + return ret; +} + +static int params_from_user(struct tee_context *ctx, struct tee_param *params, + size_t num_params, + struct tee_ioctl_param __user *uparams) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_shm *shm; + struct tee_ioctl_param ip; + + if (copy_from_user(&ip, uparams + n, sizeof(ip))) + return -EFAULT; + + /* All unused attribute bits has to be zero */ + if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK) + return -EINVAL; + + params[n].attr = ip.attr; + switch (ip.attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + params[n].u.value.a = ip.a; + params[n].u.value.b = ip.b; + params[n].u.value.c = ip.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + /* + * If we fail to get a pointer to a shared memory + * object (and increase the ref count) from an + * identifier we return an error. All pointers that + * has been added in params have an increased ref + * count. It's the callers responibility to do + * tee_shm_put() on all resolved pointers. + */ + shm = tee_shm_get_from_id(ctx, ip.c); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + params[n].u.memref.shm_offs = ip.a; + params[n].u.memref.size = ip.b; + params[n].u.memref.shm = shm; + break; + default: + /* Unknown attribute */ + return -EINVAL; + } + } + return 0; +} + +static int params_to_user(struct tee_ioctl_param __user *uparams, + size_t num_params, struct tee_param *params) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_ioctl_param __user *up = uparams + n; + struct tee_param *p = params + n; + + switch (p->attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + if (put_user(p->u.value.a, &up->a) || + put_user(p->u.value.b, &up->b) || + put_user(p->u.value.c, &up->c)) + return -EFAULT; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + if (put_user((u64)p->u.memref.size, &up->b)) + return -EFAULT; + default: + break; + } + } + return 0; +} + +static bool param_is_memref(struct tee_param *param) +{ + switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + return true; + default: + return false; + } +} + +static int tee_ioctl_open_session(struct tee_context *ctx, + struct tee_ioctl_buf_data __user *ubuf) +{ + int rc; + size_t n; + struct tee_ioctl_buf_data buf; + struct tee_ioctl_open_session_arg __user *uarg; + struct tee_ioctl_open_session_arg arg; + struct tee_ioctl_param __user *uparams = NULL; + struct tee_param *params = NULL; + bool have_session = false; + + if (!ctx->teedev->desc->ops->open_session) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, sizeof(buf))) + return -EFAULT; + + if (buf.buf_len > TEE_MAX_ARG_SIZE || + buf.buf_len < sizeof(struct tee_ioctl_open_session_arg)) + return -EINVAL; + + uarg = u64_to_user_ptr(buf.buf_ptr); + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len) + return -EINVAL; + + if (arg.num_params) { + params = kcalloc(arg.num_params, sizeof(struct tee_param), + GFP_KERNEL); + if (!params) + return -ENOMEM; + uparams = uarg->params; + rc = params_from_user(ctx, params, arg.num_params, uparams); + if (rc) + goto out; + } + + rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params); + if (rc) + goto out; + have_session = true; + + if (put_user(arg.session, &uarg->session) || + put_user(arg.ret, &uarg->ret) || + put_user(arg.ret_origin, &uarg->ret_origin)) { + rc = -EFAULT; + goto out; + } + rc = params_to_user(uparams, arg.num_params, params); +out: + /* + * If we've succeeded to open the session but failed to communicate + * it back to user space, close the session again to avoid leakage. + */ + if (rc && have_session && ctx->teedev->desc->ops->close_session) + ctx->teedev->desc->ops->close_session(ctx, arg.session); + + if (params) { + /* Decrease ref count for all valid shared memory pointers */ + for (n = 0; n < arg.num_params; n++) + if (param_is_memref(params + n) && + params[n].u.memref.shm) + tee_shm_put(params[n].u.memref.shm); + kfree(params); + } + + return rc; +} + +static int tee_ioctl_invoke(struct tee_context *ctx, + struct tee_ioctl_buf_data __user *ubuf) +{ + int rc; + size_t n; + struct tee_ioctl_buf_data buf; + struct tee_ioctl_invoke_arg __user *uarg; + struct tee_ioctl_invoke_arg arg; + struct tee_ioctl_param __user *uparams = NULL; + struct tee_param *params = NULL; + + if (!ctx->teedev->desc->ops->invoke_func) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, sizeof(buf))) + return -EFAULT; + + if (buf.buf_len > TEE_MAX_ARG_SIZE || + buf.buf_len < sizeof(struct tee_ioctl_invoke_arg)) + return -EINVAL; + + uarg = u64_to_user_ptr(buf.buf_ptr); + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len) + return -EINVAL; + + if (arg.num_params) { + params = kcalloc(arg.num_params, sizeof(struct tee_param), + GFP_KERNEL); + if (!params) + return -ENOMEM; + uparams = uarg->params; + rc = params_from_user(ctx, params, arg.num_params, uparams); + if (rc) + goto out; + } + + rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params); + if (rc) + goto out; + + if (put_user(arg.ret, &uarg->ret) || + put_user(arg.ret_origin, &uarg->ret_origin)) { + rc = -EFAULT; + goto out; + } + rc = params_to_user(uparams, arg.num_params, params); +out: + if (params) { + /* Decrease ref count for all valid shared memory pointers */ + for (n = 0; n < arg.num_params; n++) + if (param_is_memref(params + n) && + params[n].u.memref.shm) + tee_shm_put(params[n].u.memref.shm); + kfree(params); + } + return rc; +} + +static int tee_ioctl_cancel(struct tee_context *ctx, + struct tee_ioctl_cancel_arg __user *uarg) +{ + struct tee_ioctl_cancel_arg arg; + + if (!ctx->teedev->desc->ops->cancel_req) + return -EINVAL; + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id, + arg.session); +} + +static int +tee_ioctl_close_session(struct tee_context *ctx, + struct tee_ioctl_close_session_arg __user *uarg) +{ + struct tee_ioctl_close_session_arg arg; + + if (!ctx->teedev->desc->ops->close_session) + return -EINVAL; + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + return ctx->teedev->desc->ops->close_session(ctx, arg.session); +} + +static int params_to_supp(struct tee_context *ctx, + struct tee_ioctl_param __user *uparams, + size_t num_params, struct tee_param *params) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_ioctl_param ip; + struct tee_param *p = params + n; + + ip.attr = p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK; + switch (p->attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + ip.a = p->u.value.a; + ip.b = p->u.value.b; + ip.c = p->u.value.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + ip.b = p->u.memref.size; + if (!p->u.memref.shm) { + ip.a = 0; + ip.c = (u64)-1; /* invalid shm id */ + break; + } + ip.a = p->u.memref.shm_offs; + ip.c = p->u.memref.shm->id; + break; + default: + ip.a = 0; + ip.b = 0; + ip.c = 0; + break; + } + + if (copy_to_user(uparams + n, &ip, sizeof(ip))) + return -EFAULT; + } + + return 0; +} + +static int tee_ioctl_supp_recv(struct tee_context *ctx, + struct tee_ioctl_buf_data __user *ubuf) +{ + int rc; + struct tee_ioctl_buf_data buf; + struct tee_iocl_supp_recv_arg __user *uarg; + struct tee_param *params; + u32 num_params; + u32 func; + + if (!ctx->teedev->desc->ops->supp_recv) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, sizeof(buf))) + return -EFAULT; + + if (buf.buf_len > TEE_MAX_ARG_SIZE || + buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg)) + return -EINVAL; + + uarg = u64_to_user_ptr(buf.buf_ptr); + if (get_user(num_params, &uarg->num_params)) + return -EFAULT; + + if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len) + return -EINVAL; + + params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL); + if (!params) + return -ENOMEM; + + rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params); + if (rc) + goto out; + + if (put_user(func, &uarg->func) || + put_user(num_params, &uarg->num_params)) { + rc = -EFAULT; + goto out; + } + + rc = params_to_supp(ctx, uarg->params, num_params, params); +out: + kfree(params); + return rc; +} + +static int params_from_supp(struct tee_param *params, size_t num_params, + struct tee_ioctl_param __user *uparams) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_param *p = params + n; + struct tee_ioctl_param ip; + + if (copy_from_user(&ip, uparams + n, sizeof(ip))) + return -EFAULT; + + /* All unused attribute bits has to be zero */ + if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK) + return -EINVAL; + + p->attr = ip.attr; + switch (ip.attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + /* Only out and in/out values can be updated */ + p->u.value.a = ip.a; + p->u.value.b = ip.b; + p->u.value.c = ip.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + /* + * Only the size of the memref can be updated. + * Since we don't have access to the original + * parameters here, only store the supplied size. + * The driver will copy the updated size into the + * original parameters. + */ + p->u.memref.shm = NULL; + p->u.memref.shm_offs = 0; + p->u.memref.size = ip.b; + break; + default: + memset(&p->u, 0, sizeof(p->u)); + break; + } + } + return 0; +} + +static int tee_ioctl_supp_send(struct tee_context *ctx, + struct tee_ioctl_buf_data __user *ubuf) +{ + long rc; + struct tee_ioctl_buf_data buf; + struct tee_iocl_supp_send_arg __user *uarg; + struct tee_param *params; + u32 num_params; + u32 ret; + + /* Not valid for this driver */ + if (!ctx->teedev->desc->ops->supp_send) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, sizeof(buf))) + return -EFAULT; + + if (buf.buf_len > TEE_MAX_ARG_SIZE || + buf.buf_len < sizeof(struct tee_iocl_supp_send_arg)) + return -EINVAL; + + uarg = u64_to_user_ptr(buf.buf_ptr); + if (get_user(ret, &uarg->ret) || + get_user(num_params, &uarg->num_params)) + return -EFAULT; + + if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len) + return -EINVAL; + + params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL); + if (!params) + return -ENOMEM; + + rc = params_from_supp(params, num_params, uarg->params); + if (rc) + goto out; + + rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params); +out: + kfree(params); + return rc; +} + +static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct tee_context *ctx = filp->private_data; + void __user *uarg = (void __user *)arg; + + switch (cmd) { + case TEE_IOC_VERSION: + return tee_ioctl_version(ctx, uarg); + case TEE_IOC_SHM_ALLOC: + return tee_ioctl_shm_alloc(ctx, uarg); + case TEE_IOC_OPEN_SESSION: + return tee_ioctl_open_session(ctx, uarg); + case TEE_IOC_INVOKE: + return tee_ioctl_invoke(ctx, uarg); + case TEE_IOC_CANCEL: + return tee_ioctl_cancel(ctx, uarg); + case TEE_IOC_CLOSE_SESSION: + return tee_ioctl_close_session(ctx, uarg); + case TEE_IOC_SUPPL_RECV: + return tee_ioctl_supp_recv(ctx, uarg); + case TEE_IOC_SUPPL_SEND: + return tee_ioctl_supp_send(ctx, uarg); + default: + return -EINVAL; + } +} + +static const struct file_operations tee_fops = { + .owner = THIS_MODULE, + .open = tee_open, + .release = tee_release, + .unlocked_ioctl = tee_ioctl, + .compat_ioctl = tee_ioctl, +}; + +static void tee_release_device(struct device *dev) +{ + struct tee_device *teedev = container_of(dev, struct tee_device, dev); + + spin_lock(&driver_lock); + clear_bit(teedev->id, dev_mask); + spin_unlock(&driver_lock); + mutex_destroy(&teedev->mutex); + idr_destroy(&teedev->idr); + kfree(teedev); +} + +/** + * tee_device_alloc() - Allocate a new struct tee_device instance + * @teedesc: Descriptor for this driver + * @dev: Parent device for this device + * @pool: Shared memory pool, NULL if not used + * @driver_data: Private driver data for this device + * + * Allocates a new struct tee_device instance. The device is + * removed by tee_device_unregister(). + * + * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure + */ +struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, + struct device *dev, + struct tee_shm_pool *pool, + void *driver_data) +{ + struct tee_device *teedev; + void *ret; + int rc; + int offs = 0; + + if (!teedesc || !teedesc->name || !teedesc->ops || + !teedesc->ops->get_version || !teedesc->ops->open || + !teedesc->ops->release || !pool) + return ERR_PTR(-EINVAL); + + teedev = kzalloc(sizeof(*teedev), GFP_KERNEL); + if (!teedev) { + ret = ERR_PTR(-ENOMEM); + goto err; + } + + if (teedesc->flags & TEE_DESC_PRIVILEGED) + offs = TEE_NUM_DEVICES / 2; + + spin_lock(&driver_lock); + teedev->id = find_next_zero_bit(dev_mask, TEE_NUM_DEVICES, offs); + if (teedev->id < TEE_NUM_DEVICES) + set_bit(teedev->id, dev_mask); + spin_unlock(&driver_lock); + + if (teedev->id >= TEE_NUM_DEVICES) { + ret = ERR_PTR(-ENOMEM); + goto err; + } + + snprintf(teedev->name, sizeof(teedev->name), "tee%s%d", + teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "", + teedev->id - offs); + + teedev->dev.class = tee_class; + teedev->dev.release = tee_release_device; + teedev->dev.parent = dev; + + teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id); + + rc = dev_set_name(&teedev->dev, "%s", teedev->name); + if (rc) { + ret = ERR_PTR(rc); + goto err_devt; + } + + cdev_init(&teedev->cdev, &tee_fops); + teedev->cdev.owner = teedesc->owner; + teedev->cdev.kobj.parent = &teedev->dev.kobj; + + dev_set_drvdata(&teedev->dev, driver_data); + device_initialize(&teedev->dev); + + /* 1 as tee_device_unregister() does one final tee_device_put() */ + teedev->num_users = 1; + init_completion(&teedev->c_no_users); + mutex_init(&teedev->mutex); + idr_init(&teedev->idr); + + teedev->desc = teedesc; + teedev->pool = pool; + + return teedev; +err_devt: + unregister_chrdev_region(teedev->dev.devt, 1); +err: + pr_err("could not register %s driver\n", + teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client"); + if (teedev && teedev->id < TEE_NUM_DEVICES) { + spin_lock(&driver_lock); + clear_bit(teedev->id, dev_mask); + spin_unlock(&driver_lock); + } + kfree(teedev); + return ret; +} +EXPORT_SYMBOL_GPL(tee_device_alloc); + +static ssize_t implementation_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tee_device *teedev = container_of(dev, struct tee_device, dev); + struct tee_ioctl_version_data vers; + + teedev->desc->ops->get_version(teedev, &vers); + return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id); +} +static DEVICE_ATTR_RO(implementation_id); + +static struct attribute *tee_dev_attrs[] = { + &dev_attr_implementation_id.attr, + NULL +}; + +static const struct attribute_group tee_dev_group = { + .attrs = tee_dev_attrs, +}; + +/** + * tee_device_register() - Registers a TEE device + * @teedev: Device to register + * + * tee_device_unregister() need to be called to remove the @teedev if + * this function fails. + * + * @returns < 0 on failure + */ +int tee_device_register(struct tee_device *teedev) +{ + int rc; + + if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) { + dev_err(&teedev->dev, "attempt to register twice\n"); + return -EINVAL; + } + + rc = cdev_add(&teedev->cdev, teedev->dev.devt, 1); + if (rc) { + dev_err(&teedev->dev, + "unable to cdev_add() %s, major %d, minor %d, err=%d\n", + teedev->name, MAJOR(teedev->dev.devt), + MINOR(teedev->dev.devt), rc); + return rc; + } + + rc = device_add(&teedev->dev); + if (rc) { + dev_err(&teedev->dev, + "unable to device_add() %s, major %d, minor %d, err=%d\n", + teedev->name, MAJOR(teedev->dev.devt), + MINOR(teedev->dev.devt), rc); + goto err_device_add; + } + + rc = sysfs_create_group(&teedev->dev.kobj, &tee_dev_group); + if (rc) { + dev_err(&teedev->dev, + "failed to create sysfs attributes, err=%d\n", rc); + goto err_sysfs_create_group; + } + + teedev->flags |= TEE_DEVICE_FLAG_REGISTERED; + return 0; + +err_sysfs_create_group: + device_del(&teedev->dev); +err_device_add: + cdev_del(&teedev->cdev); + return rc; +} +EXPORT_SYMBOL_GPL(tee_device_register); + +void tee_device_put(struct tee_device *teedev) +{ + mutex_lock(&teedev->mutex); + /* Shouldn't put in this state */ + if (!WARN_ON(!teedev->desc)) { + teedev->num_users--; + if (!teedev->num_users) { + teedev->desc = NULL; + complete(&teedev->c_no_users); + } + } + mutex_unlock(&teedev->mutex); +} + +bool tee_device_get(struct tee_device *teedev) +{ + mutex_lock(&teedev->mutex); + if (!teedev->desc) { + mutex_unlock(&teedev->mutex); + return false; + } + teedev->num_users++; + mutex_unlock(&teedev->mutex); + return true; +} + +/** + * tee_device_unregister() - Removes a TEE device + * @teedev: Device to unregister + * + * This function should be called to remove the @teedev even if + * tee_device_register() hasn't been called yet. Does nothing if + * @teedev is NULL. + */ +void tee_device_unregister(struct tee_device *teedev) +{ + if (!teedev) + return; + + if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) { + sysfs_remove_group(&teedev->dev.kobj, &tee_dev_group); + cdev_del(&teedev->cdev); + device_del(&teedev->dev); + } + + tee_device_put(teedev); + wait_for_completion(&teedev->c_no_users); + + /* + * No need to take a mutex any longer now since teedev->desc was + * set to NULL before teedev->c_no_users was completed. + */ + + teedev->pool = NULL; + + put_device(&teedev->dev); +} +EXPORT_SYMBOL_GPL(tee_device_unregister); + +/** + * tee_get_drvdata() - Return driver_data pointer + * @teedev: Device containing the driver_data pointer + * @returns the driver_data pointer supplied to tee_register(). + */ +void *tee_get_drvdata(struct tee_device *teedev) +{ + return dev_get_drvdata(&teedev->dev); +} +EXPORT_SYMBOL_GPL(tee_get_drvdata); + +static int __init tee_init(void) +{ + int rc; + + tee_class = class_create(THIS_MODULE, "tee"); + if (IS_ERR(tee_class)) { + pr_err("couldn't create class\n"); + return PTR_ERR(tee_class); + } + + rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee"); + if (rc) { + pr_err("failed to allocate char dev region\n"); + class_destroy(tee_class); + tee_class = NULL; + } + + return rc; +} + +static void __exit tee_exit(void) +{ + class_destroy(tee_class); + tee_class = NULL; + unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES); +} + +subsys_initcall(tee_init); +module_exit(tee_exit); + +MODULE_AUTHOR("Linaro"); +MODULE_DESCRIPTION("TEE Driver"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h new file mode 100644 index 000000000000..21cb6be8bce9 --- /dev/null +++ b/drivers/tee/tee_private.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef TEE_PRIVATE_H +#define TEE_PRIVATE_H + +#include <linux/cdev.h> +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/types.h> + +struct tee_device; + +/** + * struct tee_shm - shared memory object + * @teedev: device used to allocate the object + * @ctx: context using the object, if NULL the context is gone + * @link link element + * @paddr: physical address of the shared memory + * @kaddr: virtual address of the shared memory + * @size: size of shared memory + * @dmabuf: dmabuf used to for exporting to user space + * @flags: defined by TEE_SHM_* in tee_drv.h + * @id: unique id of a shared memory object on this device + */ +struct tee_shm { + struct tee_device *teedev; + struct tee_context *ctx; + struct list_head link; + phys_addr_t paddr; + void *kaddr; + size_t size; + struct dma_buf *dmabuf; + u32 flags; + int id; +}; + +struct tee_shm_pool_mgr; + +/** + * struct tee_shm_pool_mgr_ops - shared memory pool manager operations + * @alloc: called when allocating shared memory + * @free: called when freeing shared memory + */ +struct tee_shm_pool_mgr_ops { + int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm, + size_t size); + void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm); +}; + +/** + * struct tee_shm_pool_mgr - shared memory manager + * @ops: operations + * @private_data: private data for the shared memory manager + */ +struct tee_shm_pool_mgr { + const struct tee_shm_pool_mgr_ops *ops; + void *private_data; +}; + +/** + * struct tee_shm_pool - shared memory pool + * @private_mgr: pool manager for shared memory only between kernel + * and secure world + * @dma_buf_mgr: pool manager for shared memory exported to user space + * @destroy: called when destroying the pool + * @private_data: private data for the pool + */ +struct tee_shm_pool { + struct tee_shm_pool_mgr private_mgr; + struct tee_shm_pool_mgr dma_buf_mgr; + void (*destroy)(struct tee_shm_pool *pool); + void *private_data; +}; + +#define TEE_DEVICE_FLAG_REGISTERED 0x1 +#define TEE_MAX_DEV_NAME_LEN 32 + +/** + * struct tee_device - TEE Device representation + * @name: name of device + * @desc: description of device + * @id: unique id of device + * @flags: represented by TEE_DEVICE_FLAG_REGISTERED above + * @dev: embedded basic device structure + * @cdev: embedded cdev + * @num_users: number of active users of this device + * @c_no_user: completion used when unregistering the device + * @mutex: mutex protecting @num_users and @idr + * @idr: register of shared memory object allocated on this device + * @pool: shared memory pool + */ +struct tee_device { + char name[TEE_MAX_DEV_NAME_LEN]; + const struct tee_desc *desc; + int id; + unsigned int flags; + + struct device dev; + struct cdev cdev; + + size_t num_users; + struct completion c_no_users; + struct mutex mutex; /* protects num_users and idr */ + + struct idr idr; + struct tee_shm_pool *pool; +}; + +int tee_shm_init(void); + +int tee_shm_get_fd(struct tee_shm *shm); + +bool tee_device_get(struct tee_device *teedev); +void tee_device_put(struct tee_device *teedev); + +#endif /*TEE_PRIVATE_H*/ diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c new file mode 100644 index 000000000000..d356d7f025eb --- /dev/null +++ b/drivers/tee/tee_shm.c @@ -0,0 +1,358 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/device.h> +#include <linux/dma-buf.h> +#include <linux/fdtable.h> +#include <linux/idr.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include "tee_private.h" + +static void tee_shm_release(struct tee_shm *shm) +{ + struct tee_device *teedev = shm->teedev; + struct tee_shm_pool_mgr *poolm; + + mutex_lock(&teedev->mutex); + idr_remove(&teedev->idr, shm->id); + if (shm->ctx) + list_del(&shm->link); + mutex_unlock(&teedev->mutex); + + if (shm->flags & TEE_SHM_DMA_BUF) + poolm = &teedev->pool->dma_buf_mgr; + else + poolm = &teedev->pool->private_mgr; + + poolm->ops->free(poolm, shm); + kfree(shm); + + tee_device_put(teedev); +} + +static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment + *attach, enum dma_data_direction dir) +{ + return NULL; +} + +static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *table, + enum dma_data_direction dir) +{ +} + +static void tee_shm_op_release(struct dma_buf *dmabuf) +{ + struct tee_shm *shm = dmabuf->priv; + + tee_shm_release(shm); +} + +static void *tee_shm_op_map_atomic(struct dma_buf *dmabuf, unsigned long pgnum) +{ + return NULL; +} + +static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum) +{ + return NULL; +} + +static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct tee_shm *shm = dmabuf->priv; + size_t size = vma->vm_end - vma->vm_start; + + return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, + size, vma->vm_page_prot); +} + +static struct dma_buf_ops tee_shm_dma_buf_ops = { + .map_dma_buf = tee_shm_op_map_dma_buf, + .unmap_dma_buf = tee_shm_op_unmap_dma_buf, + .release = tee_shm_op_release, + .map_atomic = tee_shm_op_map_atomic, + .map = tee_shm_op_map, + .mmap = tee_shm_op_mmap, +}; + +/** + * tee_shm_alloc() - Allocate shared memory + * @ctx: Context that allocates the shared memory + * @size: Requested size of shared memory + * @flags: Flags setting properties for the requested shared memory. + * + * Memory allocated as global shared memory is automatically freed when the + * TEE file pointer is closed. The @flags field uses the bits defined by + * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be + * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and + * associated with a dma-buf handle, else driver private memory. + */ +struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) +{ + struct tee_device *teedev = ctx->teedev; + struct tee_shm_pool_mgr *poolm = NULL; + struct tee_shm *shm; + void *ret; + int rc; + + if (!(flags & TEE_SHM_MAPPED)) { + dev_err(teedev->dev.parent, + "only mapped allocations supported\n"); + return ERR_PTR(-EINVAL); + } + + if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) { + dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags); + return ERR_PTR(-EINVAL); + } + + if (!tee_device_get(teedev)) + return ERR_PTR(-EINVAL); + + if (!teedev->pool) { + /* teedev has been detached from driver */ + ret = ERR_PTR(-EINVAL); + goto err_dev_put; + } + + shm = kzalloc(sizeof(*shm), GFP_KERNEL); + if (!shm) { + ret = ERR_PTR(-ENOMEM); + goto err_dev_put; + } + + shm->flags = flags; + shm->teedev = teedev; + shm->ctx = ctx; + if (flags & TEE_SHM_DMA_BUF) + poolm = &teedev->pool->dma_buf_mgr; + else + poolm = &teedev->pool->private_mgr; + + rc = poolm->ops->alloc(poolm, shm, size); + if (rc) { + ret = ERR_PTR(rc); + goto err_kfree; + } + + mutex_lock(&teedev->mutex); + shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); + mutex_unlock(&teedev->mutex); + if (shm->id < 0) { + ret = ERR_PTR(shm->id); + goto err_pool_free; + } + + if (flags & TEE_SHM_DMA_BUF) { + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &tee_shm_dma_buf_ops; + exp_info.size = shm->size; + exp_info.flags = O_RDWR; + exp_info.priv = shm; + + shm->dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(shm->dmabuf)) { + ret = ERR_CAST(shm->dmabuf); + goto err_rem; + } + } + mutex_lock(&teedev->mutex); + list_add_tail(&shm->link, &ctx->list_shm); + mutex_unlock(&teedev->mutex); + + return shm; +err_rem: + mutex_lock(&teedev->mutex); + idr_remove(&teedev->idr, shm->id); + mutex_unlock(&teedev->mutex); +err_pool_free: + poolm->ops->free(poolm, shm); +err_kfree: + kfree(shm); +err_dev_put: + tee_device_put(teedev); + return ret; +} +EXPORT_SYMBOL_GPL(tee_shm_alloc); + +/** + * tee_shm_get_fd() - Increase reference count and return file descriptor + * @shm: Shared memory handle + * @returns user space file descriptor to shared memory + */ +int tee_shm_get_fd(struct tee_shm *shm) +{ + u32 req_flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF; + int fd; + + if ((shm->flags & req_flags) != req_flags) + return -EINVAL; + + fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); + if (fd >= 0) + get_dma_buf(shm->dmabuf); + return fd; +} + +/** + * tee_shm_free() - Free shared memory + * @shm: Handle to shared memory to free + */ +void tee_shm_free(struct tee_shm *shm) +{ + /* + * dma_buf_put() decreases the dmabuf reference counter and will + * call tee_shm_release() when the last reference is gone. + * + * In the case of driver private memory we call tee_shm_release + * directly instead as it doesn't have a reference counter. + */ + if (shm->flags & TEE_SHM_DMA_BUF) + dma_buf_put(shm->dmabuf); + else + tee_shm_release(shm); +} +EXPORT_SYMBOL_GPL(tee_shm_free); + +/** + * tee_shm_va2pa() - Get physical address of a virtual address + * @shm: Shared memory handle + * @va: Virtual address to tranlsate + * @pa: Returned physical address + * @returns 0 on success and < 0 on failure + */ +int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa) +{ + /* Check that we're in the range of the shm */ + if ((char *)va < (char *)shm->kaddr) + return -EINVAL; + if ((char *)va >= ((char *)shm->kaddr + shm->size)) + return -EINVAL; + + return tee_shm_get_pa( + shm, (unsigned long)va - (unsigned long)shm->kaddr, pa); +} +EXPORT_SYMBOL_GPL(tee_shm_va2pa); + +/** + * tee_shm_pa2va() - Get virtual address of a physical address + * @shm: Shared memory handle + * @pa: Physical address to tranlsate + * @va: Returned virtual address + * @returns 0 on success and < 0 on failure + */ +int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va) +{ + /* Check that we're in the range of the shm */ + if (pa < shm->paddr) + return -EINVAL; + if (pa >= (shm->paddr + shm->size)) + return -EINVAL; + + if (va) { + void *v = tee_shm_get_va(shm, pa - shm->paddr); + + if (IS_ERR(v)) + return PTR_ERR(v); + *va = v; + } + return 0; +} +EXPORT_SYMBOL_GPL(tee_shm_pa2va); + +/** + * tee_shm_get_va() - Get virtual address of a shared memory plus an offset + * @shm: Shared memory handle + * @offs: Offset from start of this shared memory + * @returns virtual address of the shared memory + offs if offs is within + * the bounds of this shared memory, else an ERR_PTR + */ +void *tee_shm_get_va(struct tee_shm *shm, size_t offs) +{ + if (offs >= shm->size) + return ERR_PTR(-EINVAL); + return (char *)shm->kaddr + offs; +} +EXPORT_SYMBOL_GPL(tee_shm_get_va); + +/** + * tee_shm_get_pa() - Get physical address of a shared memory plus an offset + * @shm: Shared memory handle + * @offs: Offset from start of this shared memory + * @pa: Physical address to return + * @returns 0 if offs is within the bounds of this shared memory, else an + * error code. + */ +int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa) +{ + if (offs >= shm->size) + return -EINVAL; + if (pa) + *pa = shm->paddr + offs; + return 0; +} +EXPORT_SYMBOL_GPL(tee_shm_get_pa); + +/** + * tee_shm_get_from_id() - Find shared memory object and increase reference + * count + * @ctx: Context owning the shared memory + * @id: Id of shared memory object + * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure + */ +struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id) +{ + struct tee_device *teedev; + struct tee_shm *shm; + + if (!ctx) + return ERR_PTR(-EINVAL); + + teedev = ctx->teedev; + mutex_lock(&teedev->mutex); + shm = idr_find(&teedev->idr, id); + if (!shm || shm->ctx != ctx) + shm = ERR_PTR(-EINVAL); + else if (shm->flags & TEE_SHM_DMA_BUF) + get_dma_buf(shm->dmabuf); + mutex_unlock(&teedev->mutex); + return shm; +} +EXPORT_SYMBOL_GPL(tee_shm_get_from_id); + +/** + * tee_shm_get_id() - Get id of a shared memory object + * @shm: Shared memory handle + * @returns id + */ +int tee_shm_get_id(struct tee_shm *shm) +{ + return shm->id; +} +EXPORT_SYMBOL_GPL(tee_shm_get_id); + +/** + * tee_shm_put() - Decrease reference count on a shared memory handle + * @shm: Shared memory handle + */ +void tee_shm_put(struct tee_shm *shm) +{ + if (shm->flags & TEE_SHM_DMA_BUF) + dma_buf_put(shm->dmabuf); +} +EXPORT_SYMBOL_GPL(tee_shm_put); diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c new file mode 100644 index 000000000000..fb4f8522a526 --- /dev/null +++ b/drivers/tee/tee_shm_pool.c @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/device.h> +#include <linux/dma-buf.h> +#include <linux/genalloc.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include "tee_private.h" + +static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm, + struct tee_shm *shm, size_t size) +{ + unsigned long va; + struct gen_pool *genpool = poolm->private_data; + size_t s = roundup(size, 1 << genpool->min_alloc_order); + + va = gen_pool_alloc(genpool, s); + if (!va) + return -ENOMEM; + + memset((void *)va, 0, s); + shm->kaddr = (void *)va; + shm->paddr = gen_pool_virt_to_phys(genpool, va); + shm->size = s; + return 0; +} + +static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm, + struct tee_shm *shm) +{ + gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr, + shm->size); + shm->kaddr = NULL; +} + +static const struct tee_shm_pool_mgr_ops pool_ops_generic = { + .alloc = pool_op_gen_alloc, + .free = pool_op_gen_free, +}; + +static void pool_res_mem_destroy(struct tee_shm_pool *pool) +{ + gen_pool_destroy(pool->private_mgr.private_data); + gen_pool_destroy(pool->dma_buf_mgr.private_data); +} + +static int pool_res_mem_mgr_init(struct tee_shm_pool_mgr *mgr, + struct tee_shm_pool_mem_info *info, + int min_alloc_order) +{ + size_t page_mask = PAGE_SIZE - 1; + struct gen_pool *genpool = NULL; + int rc; + + /* + * Start and end must be page aligned + */ + if ((info->vaddr & page_mask) || (info->paddr & page_mask) || + (info->size & page_mask)) + return -EINVAL; + + genpool = gen_pool_create(min_alloc_order, -1); + if (!genpool) + return -ENOMEM; + + gen_pool_set_algo(genpool, gen_pool_best_fit, NULL); + rc = gen_pool_add_virt(genpool, info->vaddr, info->paddr, info->size, + -1); + if (rc) { + gen_pool_destroy(genpool); + return rc; + } + + mgr->private_data = genpool; + mgr->ops = &pool_ops_generic; + return 0; +} + +/** + * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved + * memory range + * @priv_info: Information for driver private shared memory pool + * @dmabuf_info: Information for dma-buf shared memory pool + * + * Start and end of pools will must be page aligned. + * + * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied + * in @dmabuf, others will use the range provided by @priv. + * + * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. + */ +struct tee_shm_pool * +tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, + struct tee_shm_pool_mem_info *dmabuf_info) +{ + struct tee_shm_pool *pool = NULL; + int ret; + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) { + ret = -ENOMEM; + goto err; + } + + /* + * Create the pool for driver private shared memory + */ + ret = pool_res_mem_mgr_init(&pool->private_mgr, priv_info, + 3 /* 8 byte aligned */); + if (ret) + goto err; + + /* + * Create the pool for dma_buf shared memory + */ + ret = pool_res_mem_mgr_init(&pool->dma_buf_mgr, dmabuf_info, + PAGE_SHIFT); + if (ret) + goto err; + + pool->destroy = pool_res_mem_destroy; + return pool; +err: + if (ret == -ENOMEM) + pr_err("%s: can't allocate memory for res_mem shared memory pool\n", __func__); + if (pool && pool->private_mgr.private_data) + gen_pool_destroy(pool->private_mgr.private_data); + kfree(pool); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem); + +/** + * tee_shm_pool_free() - Free a shared memory pool + * @pool: The shared memory pool to free + * + * There must be no remaining shared memory allocated from this pool when + * this function is called. + */ +void tee_shm_pool_free(struct tee_shm_pool *pool) +{ + pool->destroy(pool); + kfree(pool); +} +EXPORT_SYMBOL_GPL(tee_shm_pool_free); diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c index 5e4fa9206861..104f09c58163 100644 --- a/drivers/tty/cyclades.c +++ b/drivers/tty/cyclades.c @@ -156,8 +156,8 @@ static unsigned int cy_isa_addresses[] = { static long maddr[NR_CARDS]; static int irq[NR_CARDS]; -module_param_array(maddr, long, NULL, 0); -module_param_array(irq, int, NULL, 0); +module_param_hw_array(maddr, long, iomem, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); #endif /* CONFIG_ISA */ diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c index 4caf0c3b1f99..3b251f4e5df0 100644 --- a/drivers/tty/moxa.c +++ b/drivers/tty/moxa.c @@ -179,7 +179,7 @@ MODULE_FIRMWARE("c320tunx.cod"); module_param_array(type, uint, NULL, 0); MODULE_PARM_DESC(type, "card type: C218=2, C320=4"); -module_param_array(baseaddr, ulong, NULL, 0); +module_param_hw_array(baseaddr, ulong, ioport, NULL, 0); MODULE_PARM_DESC(baseaddr, "base address"); module_param_array(numports, uint, NULL, 0); MODULE_PARM_DESC(numports, "numports (ignored for C218)"); diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c index 7b8f383fb090..8bd6fb6d9391 100644 --- a/drivers/tty/mxser.c +++ b/drivers/tty/mxser.c @@ -183,7 +183,7 @@ static int ttymajor = MXSERMAJOR; MODULE_AUTHOR("Casper Yang"); MODULE_DESCRIPTION("MOXA Smartio/Industio Family Multiport Board Device Driver"); -module_param_array(ioaddr, ulong, NULL, 0); +module_param_hw_array(ioaddr, ulong, ioport, NULL, 0); MODULE_PARM_DESC(ioaddr, "ISA io addresses to look for a moxa board"); module_param(ttymajor, int, 0); MODULE_LICENSE("GPL"); diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c index d66c1edd9892..b51a877da986 100644 --- a/drivers/tty/rocket.c +++ b/drivers/tty/rocket.c @@ -250,15 +250,15 @@ static int sReadAiopNumChan(WordIO_t io); MODULE_AUTHOR("Theodore Ts'o"); MODULE_DESCRIPTION("Comtrol RocketPort driver"); -module_param(board1, ulong, 0); +module_param_hw(board1, ulong, ioport, 0); MODULE_PARM_DESC(board1, "I/O port for (ISA) board #1"); -module_param(board2, ulong, 0); +module_param_hw(board2, ulong, ioport, 0); MODULE_PARM_DESC(board2, "I/O port for (ISA) board #2"); -module_param(board3, ulong, 0); +module_param_hw(board3, ulong, ioport, 0); MODULE_PARM_DESC(board3, "I/O port for (ISA) board #3"); -module_param(board4, ulong, 0); +module_param_hw(board4, ulong, ioport, 0); MODULE_PARM_DESC(board4, "I/O port for (ISA) board #4"); -module_param(controller, ulong, 0); +module_param_hw(controller, ulong, ioport, 0); MODULE_PARM_DESC(controller, "I/O port for (ISA) rocketport controller"); module_param(support_low_speed, bool, 0); MODULE_PARM_DESC(support_low_speed, "1 means support 50 baud, 0 means support 460400 baud"); diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 48a07e2f617f..1aab3010fbfa 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -1191,7 +1191,7 @@ module_exit(serial8250_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Generic 8250/16x50 serial driver"); -module_param(share_irqs, uint, 0644); +module_param_hw(share_irqs, uint, other, 0644); MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices (unsafe)"); module_param(nr_uarts, uint, 0644); @@ -1201,7 +1201,7 @@ module_param(skip_txen_test, uint, 0644); MODULE_PARM_DESC(skip_txen_test, "Skip checking for the TXEN bug at init time"); #ifdef CONFIG_SERIAL_8250_RSA -module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444); +module_param_hw_array(probe_rsa, ulong, ioport, &probe_rsa_count, 0444); MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA"); #endif MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR); diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c index 657eed82eeb3..a2c308f7d637 100644 --- a/drivers/tty/synclink.c +++ b/drivers/tty/synclink.c @@ -869,9 +869,9 @@ static int txholdbufs[MAX_TOTAL_DEVICES]; module_param(break_on_load, bool, 0); module_param(ttymajor, int, 0); -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(dma, int, NULL, 0); +module_param_hw_array(io, int, ioport, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); +module_param_hw_array(dma, int, dma, NULL, 0); module_param(debug_level, int, 0); module_param_array(maxframe, int, NULL, 0); module_param_array(txdmabufs, int, NULL, 0); diff --git a/drivers/video/Makefile b/drivers/video/Makefile index 9ad3c17d6456..445b2c230b56 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_VGASTATE) += vgastate.o obj-$(CONFIG_HDMI) += hdmi.o obj-$(CONFIG_VT) += console/ +obj-$(CONFIG_FB_STI) += console/ obj-$(CONFIG_LOGO) += logo/ obj-y += backlight/ diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index 5b71bd905a60..2111d06f8c81 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -6,7 +6,7 @@ menu "Console display driver support" config VGA_CONSOLE bool "VGA text console" if EXPERT || !X86 - depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && \ + depends on !4xx && !PPC_8xx && !SPARC && !M68K && !PARISC && !FRV && \ !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && !CRIS && \ (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \ !ARM64 && !ARC && !MICROBLAZE && !OPENRISC diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 922e4eaed9c5..5c6696bb56da 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -689,8 +689,6 @@ config FB_STI select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT - select STI_CONSOLE - select VT default y ---help--- STI refers to the HP "Standard Text Interface" which is a set of diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c index fb75b7e5a19a..0c325b4da61d 100644 --- a/drivers/video/fbdev/acornfb.c +++ b/drivers/video/fbdev/acornfb.c @@ -101,7 +101,7 @@ extern unsigned int vram_size; /* set by setup.c */ #ifdef HAS_VIDC20 #include <mach/acornfb.h> -#define MAX_SIZE 2*1024*1024 +#define MAX_SIZE (2*1024*1024) /* VIDC20 has a different set of rules from the VIDC: * hcr : must be multiple of 4 @@ -162,7 +162,7 @@ static void acornfb_set_timing(struct fb_info *info) if (memcmp(¤t_vidc, &vidc, sizeof(vidc))) { current_vidc = vidc; - vidc_writel(VIDC20_CTRL| vidc.control); + vidc_writel(VIDC20_CTRL | vidc.control); vidc_writel(0xd0000000 | vidc.pll_ctl); vidc_writel(0x80000000 | vidc.h_cycle); vidc_writel(0x81000000 | vidc.h_sync_width); @@ -297,7 +297,7 @@ acornfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, pal.p = 0; vidc_writel(0x10000000); for (i = 0; i < 256; i += 1) { - pal.vidc20.red = current_par.palette[ i & 31].vidc20.red; + pal.vidc20.red = current_par.palette[i & 31].vidc20.red; pal.vidc20.green = current_par.palette[(i >> 1) & 31].vidc20.green; pal.vidc20.blue = current_par.palette[(i >> 2) & 31].vidc20.blue; vidc_writel(pal.p); @@ -1043,8 +1043,7 @@ static int acornfb_probe(struct platform_device *dev) base = dma_alloc_wc(current_par.dev, size, &handle, GFP_KERNEL); if (base == NULL) { - printk(KERN_ERR "acornfb: unable to allocate screen " - "memory\n"); + printk(KERN_ERR "acornfb: unable to allocate screen memory\n"); return -ENOMEM; } @@ -1103,8 +1102,7 @@ static int acornfb_probe(struct platform_device *dev) v_sync = h_sync / (fb_info.var.yres + fb_info.var.upper_margin + fb_info.var.lower_margin + fb_info.var.vsync_len); - printk(KERN_INFO "Acornfb: %dkB %cRAM, %s, using %dx%d, " - "%d.%03dkHz, %dHz\n", + printk(KERN_INFO "Acornfb: %dkB %cRAM, %s, using %dx%d, %d.%03dkHz, %dHz\n", fb_info.fix.smem_len / 1024, current_par.using_vram ? 'V' : 'D', VIDC_NAME, fb_info.var.xres, fb_info.var.yres, diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index 0fab92c62828..ffc2c33c6cef 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c @@ -881,8 +881,8 @@ static int clcdfb_of_dma_setup(struct clcd_fb *fb) if (err) return err; - framesize = fb->panel->mode.xres * fb->panel->mode.yres * - fb->panel->bpp / 8; + framesize = PAGE_ALIGN(fb->panel->mode.xres * fb->panel->mode.yres * + fb->panel->bpp / 8); fb->fb.screen_base = dma_alloc_coherent(&fb->dev->dev, framesize, &dma, GFP_KERNEL); if (!fb->fb.screen_base) diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c index 1928cb2b5386..7e87d0d61658 100644 --- a/drivers/video/fbdev/arcfb.c +++ b/drivers/video/fbdev/arcfb.c @@ -645,17 +645,17 @@ module_param(nosplash, uint, 0); MODULE_PARM_DESC(nosplash, "Disable doing the splash screen"); module_param(arcfb_enable, uint, 0); MODULE_PARM_DESC(arcfb_enable, "Enable communication with Arc board"); -module_param(dio_addr, ulong, 0); +module_param_hw(dio_addr, ulong, ioport, 0); MODULE_PARM_DESC(dio_addr, "IO address for data, eg: 0x480"); -module_param(cio_addr, ulong, 0); +module_param_hw(cio_addr, ulong, ioport, 0); MODULE_PARM_DESC(cio_addr, "IO address for control, eg: 0x400"); -module_param(c2io_addr, ulong, 0); +module_param_hw(c2io_addr, ulong, ioport, 0); MODULE_PARM_DESC(c2io_addr, "IO address for secondary control, eg: 0x408"); module_param(splashval, ulong, 0); MODULE_PARM_DESC(splashval, "Splash pattern: 0xFF is black, 0x00 is green"); module_param(tuhold, ulong, 0); MODULE_PARM_DESC(tuhold, "Time to hold between strobing data to Arc board"); -module_param(irq, uint, 0); +module_param_hw(irq, uint, irq, 0); MODULE_PARM_DESC(irq, "IRQ for the Arc board"); module_init(arcfb_init); diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c index 218339a4edaa..6b4c7872b375 100644 --- a/drivers/video/fbdev/aty/radeon_base.c +++ b/drivers/video/fbdev/aty/radeon_base.c @@ -2453,8 +2453,8 @@ static int radeonfb_pci_register(struct pci_dev *pdev, err |= sysfs_create_bin_file(&rinfo->pdev->dev.kobj, &edid2_attr); if (err) - pr_warning("%s() Creating sysfs files failed, continuing\n", - __func__); + pr_warn("%s() Creating sysfs files failed, continuing\n", + __func__); /* save current mode regs before we switch into the new one * so we can restore this upon __exit diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 62c0cf79674f..687ebb053438 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c @@ -1073,9 +1073,9 @@ void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs) for (i = specs->modedb_len + num; i < specs->modedb_len + num + svd_n; i++) { int idx = svd[i - specs->modedb_len - num]; if (!idx || idx >= ARRAY_SIZE(cea_modes)) { - pr_warning("Reserved SVD code %d\n", idx); + pr_warn("Reserved SVD code %d\n", idx); } else if (!cea_modes[idx].xres) { - pr_warning("Unimplemented SVD code %d\n", idx); + pr_warn("Unimplemented SVD code %d\n", idx); } else { memcpy(&m[i], cea_modes + idx, sizeof(m[i])); pr_debug("Adding SVD #%d: %ux%u@%u\n", idx, diff --git a/drivers/video/fbdev/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c index 483ab2592d0c..2488baab7c89 100644 --- a/drivers/video/fbdev/i810/i810_main.c +++ b/drivers/video/fbdev/i810/i810_main.c @@ -81,7 +81,7 @@ static u32 voffset; static int i810fb_cursor(struct fb_info *info, struct fb_cursor *cursor); static int i810fb_init_pci(struct pci_dev *dev, const struct pci_device_id *entry); -static void __exit i810fb_remove_pci(struct pci_dev *dev); +static void i810fb_remove_pci(struct pci_dev *dev); static int i810fb_resume(struct pci_dev *dev); static int i810fb_suspend(struct pci_dev *dev, pm_message_t state); @@ -128,7 +128,7 @@ static struct pci_driver i810fb_driver = { .name = "i810fb", .id_table = i810fb_pci_tbl, .probe = i810fb_init_pci, - .remove = __exit_p(i810fb_remove_pci), + .remove = i810fb_remove_pci, .suspend = i810fb_suspend, .resume = i810fb_resume, }; @@ -2123,7 +2123,7 @@ static void i810fb_release_resource(struct fb_info *info, } -static void __exit i810fb_remove_pci(struct pci_dev *dev) +static void i810fb_remove_pci(struct pci_dev *dev) { struct fb_info *info = pci_get_drvdata(dev); struct i810fb_par *par = info->par; diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c index 1b0faadb3080..c166e0725be5 100644 --- a/drivers/video/fbdev/imxfb.c +++ b/drivers/video/fbdev/imxfb.c @@ -117,6 +117,9 @@ #define IMXFB_LSCR1_DEFAULT 0x00120300 +#define LCDC_LAUSCR 0x80 +#define LAUSCR_AUS_MODE (1<<31) + /* Used fb-mode. Can be set on kernel command line, therefore file-static. */ static const char *fb_mode; @@ -158,6 +161,7 @@ struct imxfb_info { dma_addr_t dbar2; u_int pcr; + u_int lauscr; u_int pwmr; u_int lscr1; u_int dmacr; @@ -422,6 +426,11 @@ static int imxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) pcr |= imxfb_mode->pcr & ~(0x3f | (7 << 25)); fbi->pcr = pcr; + /* + * The LCDC AUS Mode Control Register does not exist on imx1. + */ + if (!is_imx1_fb(fbi) && imxfb_mode->aus_mode) + fbi->lauscr = LAUSCR_AUS_MODE; /* * Copy the RGB parameters for this display @@ -638,6 +647,9 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf if (fbi->dmacr) writel(fbi->dmacr, fbi->regs + LCDC_DMACR); + if (fbi->lauscr) + writel(fbi->lauscr, fbi->regs + LCDC_LAUSCR); + return 0; } @@ -734,6 +746,11 @@ static int imxfb_of_read_mode(struct device *dev, struct device_node *np, imxfb_mode->bpp = bpp; imxfb_mode->pcr = pcr; + /* + * fsl,aus-mode is optional + */ + imxfb_mode->aus_mode = of_property_read_bool(np, "fsl,aus-mode"); + return 0; } diff --git a/drivers/video/fbdev/n411.c b/drivers/video/fbdev/n411.c index 053deacad7cc..a3677313396e 100644 --- a/drivers/video/fbdev/n411.c +++ b/drivers/video/fbdev/n411.c @@ -193,11 +193,11 @@ module_exit(n411_exit); module_param(nosplash, uint, 0); MODULE_PARM_DESC(nosplash, "Disable doing the splash screen"); -module_param(dio_addr, ulong, 0); +module_param_hw(dio_addr, ulong, ioport, 0); MODULE_PARM_DESC(dio_addr, "IO address for data, eg: 0x480"); -module_param(cio_addr, ulong, 0); +module_param_hw(cio_addr, ulong, ioport, 0); MODULE_PARM_DESC(cio_addr, "IO address for control, eg: 0x400"); -module_param(c2io_addr, ulong, 0); +module_param_hw(c2io_addr, ulong, ioport, 0); MODULE_PARM_DESC(c2io_addr, "IO address for secondary control, eg: 0x408"); module_param(splashval, ulong, 0); MODULE_PARM_DESC(splashval, "Splash pattern: 0x00 is black, 0x01 is white"); diff --git a/drivers/video/fbdev/omap/lcd_mipid.c b/drivers/video/fbdev/omap/lcd_mipid.c index c81f150589e1..df9e6ebcfad5 100644 --- a/drivers/video/fbdev/omap/lcd_mipid.c +++ b/drivers/video/fbdev/omap/lcd_mipid.c @@ -174,7 +174,7 @@ static void hw_guard_wait(struct mipid_device *md) { unsigned long wait = md->hw_guard_end - jiffies; - if ((long)wait > 0 && wait <= md->hw_guard_wait) { + if ((long)wait > 0 && time_before_eq(wait, md->hw_guard_wait)) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(wait); } diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c index 47d7f69ad9ad..48c6500c24e1 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c @@ -941,11 +941,13 @@ static int dss_init_features(struct platform_device *pdev) return 0; } +static void dss_uninit_ports(struct platform_device *pdev); + static int dss_init_ports(struct platform_device *pdev) { struct device_node *parent = pdev->dev.of_node; struct device_node *port; - int r; + int r, ret = 0; if (parent == NULL) return 0; @@ -972,17 +974,21 @@ static int dss_init_ports(struct platform_device *pdev) switch (port_type) { case OMAP_DISPLAY_TYPE_DPI: - dpi_init_port(pdev, port); + ret = dpi_init_port(pdev, port); break; case OMAP_DISPLAY_TYPE_SDI: - sdi_init_port(pdev, port); + ret = sdi_init_port(pdev, port); break; default: break; } - } while ((port = omapdss_of_get_next_port(parent, port)) != NULL); + } while (!ret && + (port = omapdss_of_get_next_port(parent, port)) != NULL); - return 0; + if (ret) + dss_uninit_ports(pdev); + + return ret; } static void dss_uninit_ports(struct platform_device *pdev) diff --git a/drivers/video/fbdev/pmag-aa-fb.c b/drivers/video/fbdev/pmag-aa-fb.c index ffe2dd482f84..39922f072db4 100644 --- a/drivers/video/fbdev/pmag-aa-fb.c +++ b/drivers/video/fbdev/pmag-aa-fb.c @@ -247,7 +247,7 @@ err_alloc: return err; } -static int __exit pmagaafb_remove(struct device *dev) +static int pmagaafb_remove(struct device *dev) { struct tc_dev *tdev = to_tc_dev(dev); struct fb_info *info = dev_get_drvdata(dev); @@ -280,7 +280,7 @@ static struct tc_driver pmagaafb_driver = { .name = "pmagaafb", .bus = &tc_bus_type, .probe = pmagaafb_probe, - .remove = __exit_p(pmagaafb_remove), + .remove = pmagaafb_remove, }, }; diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c index df02fb4b7fd1..1fd02f40708e 100644 --- a/drivers/video/fbdev/pmag-ba-fb.c +++ b/drivers/video/fbdev/pmag-ba-fb.c @@ -235,7 +235,7 @@ err_alloc: return err; } -static int __exit pmagbafb_remove(struct device *dev) +static int pmagbafb_remove(struct device *dev) { struct tc_dev *tdev = to_tc_dev(dev); struct fb_info *info = dev_get_drvdata(dev); @@ -270,7 +270,7 @@ static struct tc_driver pmagbafb_driver = { .name = "pmagbafb", .bus = &tc_bus_type, .probe = pmagbafb_probe, - .remove = __exit_p(pmagbafb_remove), + .remove = pmagbafb_remove, }, }; diff --git a/drivers/video/fbdev/pmagb-b-fb.c b/drivers/video/fbdev/pmagb-b-fb.c index a7a179a0bb33..46e96c451506 100644 --- a/drivers/video/fbdev/pmagb-b-fb.c +++ b/drivers/video/fbdev/pmagb-b-fb.c @@ -353,7 +353,7 @@ err_alloc: return err; } -static int __exit pmagbbfb_remove(struct device *dev) +static int pmagbbfb_remove(struct device *dev) { struct tc_dev *tdev = to_tc_dev(dev); struct fb_info *info = dev_get_drvdata(dev); @@ -388,7 +388,7 @@ static struct tc_driver pmagbbfb_driver = { .name = "pmagbbfb", .bus = &tc_bus_type, .probe = pmagbbfb_probe, - .remove = __exit_p(pmagbbfb_remove), + .remove = pmagbbfb_remove, }, }; diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c index ef73f14d7ba0..b21a89b03fb4 100644 --- a/drivers/video/fbdev/pxafb.c +++ b/drivers/video/fbdev/pxafb.c @@ -645,7 +645,7 @@ static void overlay1fb_disable(struct pxafb_layer *ofb) lcd_writel(ofb->fbi, FBR1, ofb->fbi->fdadr[DMA_OV1] | 0x3); if (wait_for_completion_timeout(&ofb->branch_done, 1 * HZ) == 0) - pr_warning("%s: timeout disabling overlay1\n", __func__); + pr_warn("%s: timeout disabling overlay1\n", __func__); lcd_writel(ofb->fbi, LCCR5, lccr5); } @@ -710,7 +710,7 @@ static void overlay2fb_disable(struct pxafb_layer *ofb) lcd_writel(ofb->fbi, FBR4, ofb->fbi->fdadr[DMA_OV2_Cr] | 0x3); if (wait_for_completion_timeout(&ofb->branch_done, 1 * HZ) == 0) - pr_warning("%s: timeout disabling overlay2\n", __func__); + pr_warn("%s: timeout disabling overlay2\n", __func__); } static struct pxafb_layer_ops ofb_ops[] = { @@ -1187,8 +1187,7 @@ int pxafb_smart_flush(struct fb_info *info) lcd_writel(fbi, LCCR0, fbi->reg_lccr0 | LCCR0_ENB); if (wait_for_completion_timeout(&fbi->command_done, HZ/2) == 0) { - pr_warning("%s: timeout waiting for command done\n", - __func__); + pr_warn("%s: timeout waiting for command done\n", __func__); ret = -ETIMEDOUT; } diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c index d80bc8a3200f..67e314fdd947 100644 --- a/drivers/video/fbdev/sm501fb.c +++ b/drivers/video/fbdev/sm501fb.c @@ -1600,6 +1600,7 @@ static int sm501fb_start(struct sm501fb_info *info, info->fbmem = ioremap(res->start, resource_size(res)); if (info->fbmem == NULL) { dev_err(dev, "cannot remap framebuffer\n"); + ret = -ENXIO; goto err_mem_res; } diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index e9c2f7ba3c8e..6a3c353de7c3 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -1487,15 +1487,25 @@ static struct device_attribute fb_device_attrs[] = { static int dlfb_select_std_channel(struct dlfb_data *dev) { int ret; - u8 set_def_chn[] = { 0x57, 0xCD, 0xDC, 0xA7, + void *buf; + static const u8 set_def_chn[] = { + 0x57, 0xCD, 0xDC, 0xA7, 0x1C, 0x88, 0x5E, 0x15, 0x60, 0xFE, 0xC6, 0x97, 0x16, 0x3D, 0x47, 0xF2 }; + buf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL); + + if (!buf) + return -ENOMEM; + ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), NR_USB_REQUEST_CHANNEL, (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, - set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT); + buf, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT); + + kfree(buf); + return ret; } diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c index 3ee309c50b2d..46f63960fa9e 100644 --- a/drivers/video/fbdev/xen-fbfront.c +++ b/drivers/video/fbdev/xen-fbfront.c @@ -18,6 +18,8 @@ * frame buffer. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/console.h> #include <linux/kernel.h> #include <linux/errno.h> @@ -380,10 +382,18 @@ static int xenfb_probe(struct xenbus_device *dev, video[KPARAM_MEM] = val; } + video[KPARAM_WIDTH] = xenbus_read_unsigned(dev->otherend, "width", + video[KPARAM_WIDTH]); + video[KPARAM_HEIGHT] = xenbus_read_unsigned(dev->otherend, "height", + video[KPARAM_HEIGHT]); + /* If requested res does not fit in available memory, use default */ fb_size = video[KPARAM_MEM] * 1024 * 1024; if (video[KPARAM_WIDTH] * video[KPARAM_HEIGHT] * XENFB_DEPTH / 8 > fb_size) { + pr_warn("display parameters %d,%d,%d invalid, use defaults\n", + video[KPARAM_MEM], video[KPARAM_WIDTH], + video[KPARAM_HEIGHT]); video[KPARAM_WIDTH] = XENFB_WIDTH; video[KPARAM_HEIGHT] = XENFB_HEIGHT; fb_size = XENFB_DEFAULT_FB_LEN; diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c index b6bc4a0bda2a..4d50bfd13e7c 100644 --- a/drivers/video/logo/logo.c +++ b/drivers/video/logo/logo.c @@ -34,7 +34,7 @@ static int __init fb_logo_late_init(void) return 0; } -late_initcall(fb_logo_late_init); +late_initcall_sync(fb_logo_late_init); /* logo's are marked __initdata. Use __ref to tell * modpost that it is intended that this function uses data diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 34adf9b9c053..408c174ef0d5 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -418,8 +418,7 @@ static int init_vqs(struct virtio_balloon *vb) * optionally stat. */ nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2; - err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names, - NULL); + err = virtio_find_vqs(vb->vdev, nvqs, vqs, callbacks, names, NULL); if (err) return err; diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c index 79f1293cda93..3a0468f2ceb0 100644 --- a/drivers/virtio/virtio_input.c +++ b/drivers/virtio/virtio_input.c @@ -173,8 +173,7 @@ static int virtinput_init_vqs(struct virtio_input *vi) static const char * const names[] = { "events", "status" }; int err; - err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names, - NULL); + err = virtio_find_vqs(vi->vdev, 2, vqs, cbs, names, NULL); if (err) return err; vi->evt = vqs[0]; diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 78343b8f9034..74dc7170fd35 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -351,7 +351,7 @@ static void vm_del_vqs(struct virtio_device *vdev) static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), - const char *name) + const char *name, bool ctx) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); struct virtio_mmio_vq_info *info; @@ -388,7 +388,7 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, /* Create the vring */ vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev, - true, true, vm_notify, callback, name); + true, true, ctx, vm_notify, callback, name); if (!vq) { err = -ENOMEM; goto error_new_virtqueue; @@ -447,6 +447,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], + const bool *ctx, struct irq_affinity *desc) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); @@ -459,7 +460,8 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, return err; for (i = 0; i < nvqs; ++i) { - vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]); + vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i], + ctx ? ctx[i] : false); if (IS_ERR(vqs[i])) { vm_del_vqs(vdev); return PTR_ERR(vqs[i]); diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 698d5d06fa03..007a4f366086 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -172,6 +172,7 @@ error: static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, + bool ctx, u16 msix_vec) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); @@ -183,7 +184,7 @@ static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index, if (!info) return ERR_PTR(-ENOMEM); - vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, + vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx, msix_vec); if (IS_ERR(vq)) goto out_info; @@ -274,6 +275,7 @@ void vp_del_vqs(struct virtio_device *vdev) static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], bool per_vq_vectors, + const bool *ctx, struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); @@ -315,6 +317,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, else msix_vec = VP_MSIX_VQ_VECTOR; vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], + ctx ? ctx[i] : false, msix_vec); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); @@ -345,7 +348,7 @@ error_find: static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[]) + const char * const names[], const bool *ctx) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i, err; @@ -367,6 +370,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, continue; } vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], + ctx ? ctx[i] : false, VIRTIO_MSI_NO_VECTOR); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); @@ -383,20 +387,21 @@ out_del_vqs: /* the config->find_vqs() implementation */ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], struct irq_affinity *desc) + const char * const names[], const bool *ctx, + struct irq_affinity *desc) { int err; /* Try MSI-X with one vector per queue. */ - err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, desc); + err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc); if (!err) return 0; /* Fallback: MSI-X with one vector for config, one shared for queues. */ - err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, desc); + err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc); if (!err) return 0; /* Finally fall back to regular interrupts. */ - return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names); + return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx); } const char *vp_bus_name(struct virtio_device *vdev) diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index e96334aec1e0..135ee3cf7175 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -102,6 +102,7 @@ struct virtio_pci_device { unsigned idx, void (*callback)(struct virtqueue *vq), const char *name, + bool ctx, u16 msix_vec); void (*del_vq)(struct virtio_pci_vq_info *info); @@ -131,7 +132,8 @@ void vp_del_vqs(struct virtio_device *vdev); /* the config->find_vqs() implementation */ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], struct irq_affinity *desc); + const char * const names[], const bool *ctx, + struct irq_affinity *desc); const char *vp_bus_name(struct virtio_device *vdev); /* Setup the affinity for a virtqueue: diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index 4bfa48fb1324..2780886e8ba3 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c @@ -116,6 +116,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, + bool ctx, u16 msix_vec) { struct virtqueue *vq; @@ -135,7 +136,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, /* create the vring */ vq = vring_create_virtqueue(index, num, VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, - true, false, vp_notify, callback, name); + true, false, ctx, + vp_notify, callback, name); if (!vq) return ERR_PTR(-ENOMEM); diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 8978f109d2d7..2555d80f6eec 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -297,6 +297,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, + bool ctx, u16 msix_vec) { struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common; @@ -328,7 +329,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, /* create the vring */ vq = vring_create_virtqueue(index, num, SMP_CACHE_BYTES, &vp_dev->vdev, - true, true, vp_notify, callback, name); + true, true, ctx, + vp_notify, callback, name); if (!vq) return ERR_PTR(-ENOMEM); @@ -387,12 +389,14 @@ err_map_notify: } static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], struct irq_affinity *desc) + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char * const names[], const bool *ctx, + struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtqueue *vq; - int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, desc); + int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc); if (rc) return rc; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 409aeaa49246..5e1b548828e6 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -263,6 +263,7 @@ static inline int virtqueue_add(struct virtqueue *_vq, unsigned int out_sgs, unsigned int in_sgs, void *data, + void *ctx, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); @@ -275,6 +276,7 @@ static inline int virtqueue_add(struct virtqueue *_vq, START_USE(vq); BUG_ON(data == NULL); + BUG_ON(ctx && vq->indirect); if (unlikely(vq->broken)) { END_USE(vq); @@ -389,6 +391,8 @@ static inline int virtqueue_add(struct virtqueue *_vq, vq->desc_state[head].data = data; if (indirect) vq->desc_state[head].indir_desc = desc; + if (ctx) + vq->desc_state[head].indir_desc = ctx; /* Put entry in available array (but don't update avail->idx until they * do sync). */ @@ -461,7 +465,8 @@ int virtqueue_add_sgs(struct virtqueue *_vq, for (sg = sgs[i]; sg; sg = sg_next(sg)) total_sg++; } - return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); + return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, + data, NULL, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_sgs); @@ -483,7 +488,7 @@ int virtqueue_add_outbuf(struct virtqueue *vq, void *data, gfp_t gfp) { - return virtqueue_add(vq, &sg, num, 1, 0, data, gfp); + return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); @@ -505,11 +510,35 @@ int virtqueue_add_inbuf(struct virtqueue *vq, void *data, gfp_t gfp) { - return virtqueue_add(vq, &sg, num, 0, 1, data, gfp); + return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); /** + * virtqueue_add_inbuf_ctx - expose input buffers to other end + * @vq: the struct virtqueue we're talking about. + * @sg: scatterlist (must be well-formed and terminated!) + * @num: the number of entries in @sg writable by other side + * @data: the token identifying the buffer. + * @ctx: extra context for the token + * @gfp: how to do memory allocations (if necessary). + * + * Caller must ensure we don't call this with other virtqueue operations + * at the same time (except where noted). + * + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). + */ +int virtqueue_add_inbuf_ctx(struct virtqueue *vq, + struct scatterlist *sg, unsigned int num, + void *data, + void *ctx, + gfp_t gfp) +{ + return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp); +} +EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx); + +/** * virtqueue_kick_prepare - first half of split virtqueue_kick call. * @vq: the struct virtqueue * @@ -598,7 +627,8 @@ bool virtqueue_kick(struct virtqueue *vq) } EXPORT_SYMBOL_GPL(virtqueue_kick); -static void detach_buf(struct vring_virtqueue *vq, unsigned int head) +static void detach_buf(struct vring_virtqueue *vq, unsigned int head, + void **ctx) { unsigned int i, j; __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); @@ -622,10 +652,15 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) /* Plus final descriptor */ vq->vq.num_free++; - /* Free the indirect table, if any, now that it's unmapped. */ - if (vq->desc_state[head].indir_desc) { + if (vq->indirect) { struct vring_desc *indir_desc = vq->desc_state[head].indir_desc; - u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len); + u32 len; + + /* Free the indirect table, if any, now that it's unmapped. */ + if (!indir_desc) + return; + + len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len); BUG_ON(!(vq->vring.desc[head].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))); @@ -634,8 +669,10 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) for (j = 0; j < len / sizeof(struct vring_desc); j++) vring_unmap_one(vq, &indir_desc[j]); - kfree(vq->desc_state[head].indir_desc); + kfree(indir_desc); vq->desc_state[head].indir_desc = NULL; + } else if (ctx) { + *ctx = vq->desc_state[head].indir_desc; } } @@ -660,7 +697,8 @@ static inline bool more_used(const struct vring_virtqueue *vq) * Returns NULL if there are no used buffers, or the "data" token * handed to virtqueue_add_*(). */ -void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) +void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, + void **ctx) { struct vring_virtqueue *vq = to_vvq(_vq); void *ret; @@ -698,7 +736,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) /* detach_buf clears data, so grab it now. */ ret = vq->desc_state[i].data; - detach_buf(vq, i); + detach_buf(vq, i, ctx); vq->last_used_idx++; /* If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before @@ -715,8 +753,13 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) END_USE(vq); return ret; } -EXPORT_SYMBOL_GPL(virtqueue_get_buf); +EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx); +void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) +{ + return virtqueue_get_buf_ctx(_vq, len, NULL); +} +EXPORT_SYMBOL_GPL(virtqueue_get_buf); /** * virtqueue_disable_cb - disable callbacks * @vq: the struct virtqueue we're talking about. @@ -878,7 +921,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) continue; /* detach_buf clears data, so grab it now. */ buf = vq->desc_state[i].data; - detach_buf(vq, i); + detach_buf(vq, i, NULL); vq->avail_idx_shadow--; vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); END_USE(vq); @@ -916,6 +959,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, struct vring vring, struct virtio_device *vdev, bool weak_barriers, + bool context, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name) @@ -950,7 +994,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, vq->last_add_time_valid = false; #endif - vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); + vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && + !context; vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); /* No callback? Tell other side not to bother us. */ @@ -1019,6 +1064,7 @@ struct virtqueue *vring_create_virtqueue( struct virtio_device *vdev, bool weak_barriers, bool may_reduce_num, + bool context, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name) @@ -1058,7 +1104,7 @@ struct virtqueue *vring_create_virtqueue( queue_size_in_bytes = vring_size(num, vring_align); vring_init(&vring, num, queue, vring_align); - vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, + vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, notify, callback, name); if (!vq) { vring_free_queue(vdev, queue_size_in_bytes, queue, @@ -1079,6 +1125,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, + bool context, void *pages, bool (*notify)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq), @@ -1086,7 +1133,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, { struct vring vring; vring_init(&vring, num, pages, vring_align); - return __vring_new_virtqueue(index, vring, vdev, weak_barriers, + return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, notify, callback, name); } EXPORT_SYMBOL_GPL(vring_new_virtqueue); diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c index 6d03e8e30f8b..6c3f78e45c26 100644 --- a/drivers/watchdog/cpu5wdt.c +++ b/drivers/watchdog/cpu5wdt.c @@ -289,7 +289,7 @@ MODULE_DESCRIPTION("sma cpu5 watchdog driver"); MODULE_SUPPORTED_DEVICE("sma cpu5 watchdog"); MODULE_LICENSE("GPL"); -module_param(port, int, 0); +module_param_hw(port, int, ioport, 0); MODULE_PARM_DESC(port, "base address of watchdog card, default is 0x91"); module_param(verbose, int, 0); diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c index 23ee53240c4c..38e96712264f 100644 --- a/drivers/watchdog/eurotechwdt.c +++ b/drivers/watchdog/eurotechwdt.c @@ -97,9 +97,9 @@ MODULE_PARM_DESC(nowayout, #define WDT_TIMER_CFG 0xf3 -module_param(io, int, 0); +module_param_hw(io, int, ioport, 0); MODULE_PARM_DESC(io, "Eurotech WDT io port (default=0x3f0)"); -module_param(irq, int, 0); +module_param_hw(irq, int, irq, 0); MODULE_PARM_DESC(irq, "Eurotech WDT irq (default=10)"); module_param(ev, charp, 0); MODULE_PARM_DESC(ev, "Eurotech WDT event type (default is `int')"); diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c index 9f15dd9435d1..06a892e36a8d 100644 --- a/drivers/watchdog/pc87413_wdt.c +++ b/drivers/watchdog/pc87413_wdt.c @@ -579,7 +579,7 @@ MODULE_AUTHOR("Marcus Junker <junker@anduras.de>"); MODULE_DESCRIPTION("PC87413 WDT driver"); MODULE_LICENSE("GPL"); -module_param(io, int, 0); +module_param_hw(io, int, ioport, 0); MODULE_PARM_DESC(io, MODNAME " I/O port (default: " __MODULE_STRING(IO_DEFAULT) ")."); diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c index 131193a7acdf..b34d3d5ba632 100644 --- a/drivers/watchdog/sc1200wdt.c +++ b/drivers/watchdog/sc1200wdt.c @@ -88,7 +88,7 @@ MODULE_PARM_DESC(isapnp, "When set to 0 driver ISA PnP support will be disabled"); #endif -module_param(io, int, 0); +module_param_hw(io, int, ioport, 0); MODULE_PARM_DESC(io, "io port"); module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "range is 0-255 minutes, default is 1"); diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c index e0206b5b7d89..e481fbbc4ae7 100644 --- a/drivers/watchdog/wdt.c +++ b/drivers/watchdog/wdt.c @@ -78,9 +78,9 @@ static int irq = 11; static DEFINE_SPINLOCK(wdt_lock); -module_param(io, int, 0); +module_param_hw(io, int, ioport, 0); MODULE_PARM_DESC(io, "WDT io port (default=0x240)"); -module_param(irq, int, 0); +module_param_hw(irq, int, irq, 0); MODULE_PARM_DESC(irq, "WDT irq (default=11)"); /* Support for the Fan Tachometer on the WDT501-P */ |