Merge by hand (conflicts in sd.c)

This commit is contained in:
James Bottomley 2005-09-06 17:52:54 -05:00
commit 17fa53da12
49 changed files with 2487 additions and 760 deletions

View file

@ -373,13 +373,11 @@ Summary:
scsi_activate_tcq - turn on tag command queueing
scsi_add_device - creates new scsi device (lu) instance
scsi_add_host - perform sysfs registration and SCSI bus scan.
scsi_add_timer - (re-)start timer on a SCSI command.
scsi_adjust_queue_depth - change the queue depth on a SCSI device
scsi_assign_lock - replace default host_lock with given lock
scsi_bios_ptable - return copy of block device's partition table
scsi_block_requests - prevent further commands being queued to given host
scsi_deactivate_tcq - turn off tag command queueing
scsi_delete_timer - cancel timer on a SCSI command.
scsi_host_alloc - return a new scsi_host instance whose refcount==1
scsi_host_get - increments Scsi_Host instance's refcount
scsi_host_put - decrements Scsi_Host instance's refcount (free if 0)
@ -457,27 +455,6 @@ struct scsi_device * scsi_add_device(struct Scsi_Host *shost,
int scsi_add_host(struct Scsi_Host *shost, struct device * dev)
/**
* scsi_add_timer - (re-)start timer on a SCSI command.
* @scmd: pointer to scsi command instance
* @timeout: duration of timeout in "jiffies"
* @complete: pointer to function to call if timeout expires
*
* Returns nothing
*
* Might block: no
*
* Notes: Each scsi command has its own timer, and as it is added
* to the queue, we set up the timer. When the command completes,
* we cancel the timer. An LLD can use this function to change
* the existing timeout value.
*
* Defined in: drivers/scsi/scsi_error.c
**/
void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
void (*complete)(struct scsi_cmnd *))
/**
* scsi_adjust_queue_depth - allow LLD to change queue depth on a SCSI device
* @sdev: pointer to SCSI device to change queue depth on
@ -565,24 +542,6 @@ void scsi_block_requests(struct Scsi_Host * shost)
void scsi_deactivate_tcq(struct scsi_device *sdev, int depth)
/**
* scsi_delete_timer - cancel timer on a SCSI command.
* @scmd: pointer to scsi command instance
*
* Returns 1 if able to cancel timer else 0 (i.e. too late or already
* cancelled).
*
* Might block: no [may in the future if it invokes del_timer_sync()]
*
* Notes: All commands issued by upper levels already have a timeout
* associated with them. An LLD can use this function to cancel the
* timer.
*
* Defined in: drivers/scsi/scsi_error.c
**/
int scsi_delete_timer(struct scsi_cmnd *scmd)
/**
* scsi_host_alloc - create a scsi host adapter instance and perform basic
* initialization.

View file

@ -22,11 +22,26 @@
/* This is a private structure used to tie the classdev and the
* container .. it should never be visible outside this file */
struct internal_container {
struct list_head node;
struct klist_node node;
struct attribute_container *cont;
struct class_device classdev;
};
static void internal_container_klist_get(struct klist_node *n)
{
struct internal_container *ic =
container_of(n, struct internal_container, node);
class_device_get(&ic->classdev);
}
static void internal_container_klist_put(struct klist_node *n)
{
struct internal_container *ic =
container_of(n, struct internal_container, node);
class_device_put(&ic->classdev);
}
/**
* attribute_container_classdev_to_container - given a classdev, return the container
*
@ -57,8 +72,8 @@ int
attribute_container_register(struct attribute_container *cont)
{
INIT_LIST_HEAD(&cont->node);
INIT_LIST_HEAD(&cont->containers);
spin_lock_init(&cont->containers_lock);
klist_init(&cont->containers,internal_container_klist_get,
internal_container_klist_put);
down(&attribute_container_mutex);
list_add_tail(&cont->node, &attribute_container_list);
@ -78,13 +93,13 @@ attribute_container_unregister(struct attribute_container *cont)
{
int retval = -EBUSY;
down(&attribute_container_mutex);
spin_lock(&cont->containers_lock);
if (!list_empty(&cont->containers))
spin_lock(&cont->containers.k_lock);
if (!list_empty(&cont->containers.k_list))
goto out;
retval = 0;
list_del(&cont->node);
out:
spin_unlock(&cont->containers_lock);
spin_unlock(&cont->containers.k_lock);
up(&attribute_container_mutex);
return retval;
@ -143,7 +158,6 @@ attribute_container_add_device(struct device *dev,
continue;
}
memset(ic, 0, sizeof(struct internal_container));
INIT_LIST_HEAD(&ic->node);
ic->cont = cont;
class_device_initialize(&ic->classdev);
ic->classdev.dev = get_device(dev);
@ -154,13 +168,22 @@ attribute_container_add_device(struct device *dev,
fn(cont, dev, &ic->classdev);
else
attribute_container_add_class_device(&ic->classdev);
spin_lock(&cont->containers_lock);
list_add_tail(&ic->node, &cont->containers);
spin_unlock(&cont->containers_lock);
klist_add_tail(&ic->node, &cont->containers);
}
up(&attribute_container_mutex);
}
/* FIXME: can't break out of this unless klist_iter_exit is also
* called before doing the break
*/
#define klist_for_each_entry(pos, head, member, iter) \
for (klist_iter_init(head, iter); (pos = ({ \
struct klist_node *n = klist_next(iter); \
n ? container_of(n, typeof(*pos), member) : \
({ klist_iter_exit(iter) ; NULL; }); \
}) ) != NULL; )
/**
* attribute_container_remove_device - make device eligible for removal.
*
@ -187,18 +210,19 @@ attribute_container_remove_device(struct device *dev,
down(&attribute_container_mutex);
list_for_each_entry(cont, &attribute_container_list, node) {
struct internal_container *ic, *tmp;
struct internal_container *ic;
struct klist_iter iter;
if (attribute_container_no_classdevs(cont))
continue;
if (!cont->match(cont, dev))
continue;
spin_lock(&cont->containers_lock);
list_for_each_entry_safe(ic, tmp, &cont->containers, node) {
klist_for_each_entry(ic, &cont->containers, node, &iter) {
if (dev != ic->classdev.dev)
continue;
list_del(&ic->node);
klist_del(&ic->node);
if (fn)
fn(cont, dev, &ic->classdev);
else {
@ -206,7 +230,6 @@ attribute_container_remove_device(struct device *dev,
class_device_unregister(&ic->classdev);
}
}
spin_unlock(&cont->containers_lock);
}
up(&attribute_container_mutex);
}
@ -232,7 +255,8 @@ attribute_container_device_trigger(struct device *dev,
down(&attribute_container_mutex);
list_for_each_entry(cont, &attribute_container_list, node) {
struct internal_container *ic, *tmp;
struct internal_container *ic;
struct klist_iter iter;
if (!cont->match(cont, dev))
continue;
@ -242,12 +266,10 @@ attribute_container_device_trigger(struct device *dev,
continue;
}
spin_lock(&cont->containers_lock);
list_for_each_entry_safe(ic, tmp, &cont->containers, node) {
klist_for_each_entry(ic, &cont->containers, node, &iter) {
if (dev == ic->classdev.dev)
fn(cont, dev, &ic->classdev);
}
spin_unlock(&cont->containers_lock);
}
up(&attribute_container_mutex);
}
@ -397,15 +419,16 @@ attribute_container_find_class_device(struct attribute_container *cont,
{
struct class_device *cdev = NULL;
struct internal_container *ic;
struct klist_iter iter;
spin_lock(&cont->containers_lock);
list_for_each_entry(ic, &cont->containers, node) {
klist_for_each_entry(ic, &cont->containers, node, &iter) {
if (ic->classdev.dev == dev) {
cdev = &ic->classdev;
/* FIXME: must exit iterator then break */
klist_iter_exit(&iter);
break;
}
}
spin_unlock(&cont->containers_lock);
return cdev;
}

View file

@ -6,7 +6,7 @@
* Title: MPI Message independent structures and definitions
* Creation Date: July 27, 2000
*
* mpi.h Version: 01.05.07
* mpi.h Version: 01.05.08
*
* Version History
* ---------------
@ -71,6 +71,9 @@
* 03-11-05 01.05.07 Removed function codes for SCSI IO 32 and
* TargetAssistExtended requests.
* Removed EEDP IOCStatus codes.
* 06-24-05 01.05.08 Added function codes for SCSI IO 32 and
* TargetAssistExtended requests.
* Added EEDP IOCStatus codes.
* --------------------------------------------------------------------------
*/
@ -101,7 +104,7 @@
/* Note: The major versions of 0xe0 through 0xff are reserved */
/* versioning for this MPI header set */
#define MPI_HEADER_VERSION_UNIT (0x09)
#define MPI_HEADER_VERSION_UNIT (0x0A)
#define MPI_HEADER_VERSION_DEV (0x00)
#define MPI_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI_HEADER_VERSION_UNIT_SHIFT (8)
@ -292,10 +295,13 @@
#define MPI_FUNCTION_DIAG_BUFFER_POST (0x1D)
#define MPI_FUNCTION_DIAG_RELEASE (0x1E)
#define MPI_FUNCTION_SCSI_IO_32 (0x1F)
#define MPI_FUNCTION_LAN_SEND (0x20)
#define MPI_FUNCTION_LAN_RECEIVE (0x21)
#define MPI_FUNCTION_LAN_RESET (0x22)
#define MPI_FUNCTION_TARGET_ASSIST_EXTENDED (0x23)
#define MPI_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24)
#define MPI_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25)
@ -680,6 +686,15 @@ typedef struct _MSG_DEFAULT_REPLY
#define MPI_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B)
#define MPI_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C)
/****************************************************************************/
/* For use by SCSI Initiator and SCSI Target end-to-end data protection */
/****************************************************************************/
#define MPI_IOCSTATUS_EEDP_GUARD_ERROR (0x004D)
#define MPI_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E)
#define MPI_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F)
/****************************************************************************/
/* SCSI Target values */
/****************************************************************************/

View file

@ -6,7 +6,7 @@
* Title: MPI Config message, structures, and Pages
* Creation Date: July 27, 2000
*
* mpi_cnfg.h Version: 01.05.08
* mpi_cnfg.h Version: 01.05.09
*
* Version History
* ---------------
@ -232,6 +232,23 @@
* New physical mapping mode in SAS IO Unit Page 2.
* Added CONFIG_PAGE_SAS_ENCLOSURE_0.
* Added Slot and Enclosure fields to SAS Device Page 0.
* 06-24-05 01.05.09 Added EEDP defines to IOC Page 1.
* Added more RAID type defines to IOC Page 2.
* Added Port Enable Delay settings to BIOS Page 1.
* Added Bad Block Table Full define to RAID Volume Page 0.
* Added Previous State defines to RAID Physical Disk
* Page 0.
* Added Max Sata Targets define for DiscoveryStatus field
* of SAS IO Unit Page 0.
* Added Device Self Test to Control Flags of SAS IO Unit
* Page 1.
* Added Direct Attach Starting Slot Number define for SAS
* IO Unit Page 2.
* Added new fields in SAS Device Page 2 for enclosure
* mapping.
* Added OwnerDevHandle and Flags field to SAS PHY Page 0.
* Added IOC GPIO Flags define to SAS Enclosure Page 0.
* Fixed the value for MPI_SAS_IOUNIT1_CONTROL_DEV_SATA_SUPPORT.
* --------------------------------------------------------------------------
*/
@ -477,6 +494,7 @@ typedef struct _MSG_CONFIG_REPLY
#define MPI_MANUFACTPAGE_DEVICEID_FC929X (0x0626)
#define MPI_MANUFACTPAGE_DEVICEID_FC939X (0x0642)
#define MPI_MANUFACTPAGE_DEVICEID_FC949X (0x0640)
#define MPI_MANUFACTPAGE_DEVICEID_FC949ES (0x0646)
/* SCSI */
#define MPI_MANUFACTPAGE_DEVID_53C1030 (0x0030)
#define MPI_MANUFACTPAGE_DEVID_53C1030ZC (0x0031)
@ -769,9 +787,13 @@ typedef struct _CONFIG_PAGE_IOC_1
} CONFIG_PAGE_IOC_1, MPI_POINTER PTR_CONFIG_PAGE_IOC_1,
IOCPage1_t, MPI_POINTER pIOCPage1_t;
#define MPI_IOCPAGE1_PAGEVERSION (0x02)
#define MPI_IOCPAGE1_PAGEVERSION (0x03)
/* defines for the Flags field */
#define MPI_IOCPAGE1_EEDP_MODE_MASK (0x07000000)
#define MPI_IOCPAGE1_EEDP_MODE_OFF (0x00000000)
#define MPI_IOCPAGE1_EEDP_MODE_T10 (0x01000000)
#define MPI_IOCPAGE1_EEDP_MODE_LSI_1 (0x02000000)
#define MPI_IOCPAGE1_INITIATOR_CONTEXT_REPLY_DISABLE (0x00000010)
#define MPI_IOCPAGE1_REPLY_COALESCING (0x00000001)
@ -795,6 +817,11 @@ typedef struct _CONFIG_PAGE_IOC_2_RAID_VOL
#define MPI_RAID_VOL_TYPE_IS (0x00)
#define MPI_RAID_VOL_TYPE_IME (0x01)
#define MPI_RAID_VOL_TYPE_IM (0x02)
#define MPI_RAID_VOL_TYPE_RAID_5 (0x03)
#define MPI_RAID_VOL_TYPE_RAID_6 (0x04)
#define MPI_RAID_VOL_TYPE_RAID_10 (0x05)
#define MPI_RAID_VOL_TYPE_RAID_50 (0x06)
#define MPI_RAID_VOL_TYPE_UNKNOWN (0xFF)
/* IOC Page 2 Volume Flags values */
@ -820,13 +847,17 @@ typedef struct _CONFIG_PAGE_IOC_2
} CONFIG_PAGE_IOC_2, MPI_POINTER PTR_CONFIG_PAGE_IOC_2,
IOCPage2_t, MPI_POINTER pIOCPage2_t;
#define MPI_IOCPAGE2_PAGEVERSION (0x02)
#define MPI_IOCPAGE2_PAGEVERSION (0x03)
/* IOC Page 2 Capabilities flags */
#define MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT (0x00000001)
#define MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT (0x00000002)
#define MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT (0x00000004)
#define MPI_IOCPAGE2_CAP_FLAGS_RAID_5_SUPPORT (0x00000008)
#define MPI_IOCPAGE2_CAP_FLAGS_RAID_6_SUPPORT (0x00000010)
#define MPI_IOCPAGE2_CAP_FLAGS_RAID_10_SUPPORT (0x00000020)
#define MPI_IOCPAGE2_CAP_FLAGS_RAID_50_SUPPORT (0x00000040)
#define MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT (0x20000000)
#define MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT (0x40000000)
#define MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT (0x80000000)
@ -945,7 +976,7 @@ typedef struct _CONFIG_PAGE_BIOS_1
} CONFIG_PAGE_BIOS_1, MPI_POINTER PTR_CONFIG_PAGE_BIOS_1,
BIOSPage1_t, MPI_POINTER pBIOSPage1_t;
#define MPI_BIOSPAGE1_PAGEVERSION (0x01)
#define MPI_BIOSPAGE1_PAGEVERSION (0x02)
/* values for the BiosOptions field */
#define MPI_BIOSPAGE1_OPTIONS_SPI_ENABLE (0x00000400)
@ -954,6 +985,8 @@ typedef struct _CONFIG_PAGE_BIOS_1
#define MPI_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001)
/* values for the IOCSettings field */
#define MPI_BIOSPAGE1_IOCSET_MASK_PORT_ENABLE_DELAY (0x00F00000)
#define MPI_BIOSPAGE1_IOCSET_SHIFT_PORT_ENABLE_DELAY (20)
#define MPI_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000)
#define MPI_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000)
#define MPI_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000)
@ -1167,6 +1200,7 @@ typedef struct _CONFIG_PAGE_BIOS_2
#define MPI_BIOSPAGE2_FORM_PCI_SLOT_NUMBER (0x03)
#define MPI_BIOSPAGE2_FORM_FC_WWN (0x04)
#define MPI_BIOSPAGE2_FORM_SAS_WWN (0x05)
#define MPI_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06)
/****************************************************************************
@ -1957,11 +1991,11 @@ typedef struct _RAID_VOL0_STATUS
RaidVol0Status_t, MPI_POINTER pRaidVol0Status_t;
/* RAID Volume Page 0 VolumeStatus defines */
#define MPI_RAIDVOL0_STATUS_FLAG_ENABLED (0x01)
#define MPI_RAIDVOL0_STATUS_FLAG_QUIESCED (0x02)
#define MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x04)
#define MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x08)
#define MPI_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL (0x10)
#define MPI_RAIDVOL0_STATUS_STATE_OPTIMAL (0x00)
#define MPI_RAIDVOL0_STATUS_STATE_DEGRADED (0x01)
@ -2025,7 +2059,7 @@ typedef struct _CONFIG_PAGE_RAID_VOL_0
} CONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0,
RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t;
#define MPI_RAIDVOLPAGE0_PAGEVERSION (0x04)
#define MPI_RAIDVOLPAGE0_PAGEVERSION (0x05)
/* values for RAID Volume Page 0 InactiveStatus field */
#define MPI_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00)
@ -2104,6 +2138,8 @@ typedef struct _RAID_PHYS_DISK0_STATUS
#define MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x01)
#define MPI_PHYSDISK0_STATUS_FLAG_QUIESCED (0x02)
#define MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x04)
#define MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS (0x00)
#define MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS (0x08)
#define MPI_PHYSDISK0_STATUS_ONLINE (0x00)
#define MPI_PHYSDISK0_STATUS_MISSING (0x01)
@ -2132,7 +2168,7 @@ typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_0
} CONFIG_PAGE_RAID_PHYS_DISK_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_0,
RaidPhysDiskPage0_t, MPI_POINTER pRaidPhysDiskPage0_t;
#define MPI_RAIDPHYSDISKPAGE0_PAGEVERSION (0x01)
#define MPI_RAIDPHYSDISKPAGE0_PAGEVERSION (0x02)
typedef struct _RAID_PHYS_DISK1_PATH
@ -2263,7 +2299,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
} CONFIG_PAGE_SAS_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_0,
SasIOUnitPage0_t, MPI_POINTER pSasIOUnitPage0_t;
#define MPI_SASIOUNITPAGE0_PAGEVERSION (0x02)
#define MPI_SASIOUNITPAGE0_PAGEVERSION (0x03)
/* values for SAS IO Unit Page 0 PortFlags */
#define MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS (0x08)
@ -2299,6 +2335,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
#define MPI_SAS_IOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200)
#define MPI_SAS_IOUNIT0_DS_TABLE_LINK (0x00000400)
#define MPI_SAS_IOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800)
#define MPI_SAS_IOUNIT0_DS_MAX_SATA_TARGETS (0x00001000)
typedef struct _MPI_SAS_IO_UNIT1_PHY_DATA
@ -2336,6 +2373,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_1
#define MPI_SASIOUNITPAGE1_PAGEVERSION (0x04)
/* values for SAS IO Unit Page 1 ControlFlags */
#define MPI_SAS_IOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000)
#define MPI_SAS_IOUNIT1_CONTROL_SATA_3_0_MAX (0x4000)
#define MPI_SAS_IOUNIT1_CONTROL_SATA_1_5_MAX (0x2000)
#define MPI_SAS_IOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000)
@ -2345,9 +2383,8 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_1
#define MPI_SAS_IOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9)
#define MPI_SAS_IOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x00)
#define MPI_SAS_IOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x01)
#define MPI_SAS_IOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x10)
#define MPI_SAS_IOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x02)
#define MPI_SAS_IOUNIT1_CONTROL_AUTO_PORT_SAME_SAS_ADDR (0x0100)
#define MPI_SAS_IOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080)
#define MPI_SAS_IOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040)
#define MPI_SAS_IOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020)
@ -2390,7 +2427,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_2
} CONFIG_PAGE_SAS_IO_UNIT_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_2,
SasIOUnitPage2_t, MPI_POINTER pSasIOUnitPage2_t;
#define MPI_SASIOUNITPAGE2_PAGEVERSION (0x03)
#define MPI_SASIOUNITPAGE2_PAGEVERSION (0x04)
/* values for SAS IO Unit Page 2 Status field */
#define MPI_SAS_IOUNIT2_STATUS_DISABLED_PERSISTENT_MAPPINGS (0x02)
@ -2406,6 +2443,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_2
#define MPI_SAS_IOUNIT2_FLAGS_ENCLOSURE_SLOT_PHYS_MAP (0x02)
#define MPI_SAS_IOUNIT2_FLAGS_RESERVE_ID_0_FOR_BOOT (0x10)
#define MPI_SAS_IOUNIT2_FLAGS_DA_STARTING_SLOT (0x20)
typedef struct _CONFIG_PAGE_SAS_IO_UNIT_3
@ -2584,11 +2622,19 @@ typedef struct _CONFIG_PAGE_SAS_DEVICE_2
{
CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
U64 PhysicalIdentifier; /* 08h */
U32 Reserved1; /* 10h */
U32 EnclosureMapping; /* 10h */
} CONFIG_PAGE_SAS_DEVICE_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_DEVICE_2,
SasDevicePage2_t, MPI_POINTER pSasDevicePage2_t;
#define MPI_SASDEVICE2_PAGEVERSION (0x00)
#define MPI_SASDEVICE2_PAGEVERSION (0x01)
/* defines for SAS Device Page 2 EnclosureMapping field */
#define MPI_SASDEVICE2_ENC_MAP_MASK_MISSING_COUNT (0x0000000F)
#define MPI_SASDEVICE2_ENC_MAP_SHIFT_MISSING_COUNT (0)
#define MPI_SASDEVICE2_ENC_MAP_MASK_NUM_SLOTS (0x000007F0)
#define MPI_SASDEVICE2_ENC_MAP_SHIFT_NUM_SLOTS (4)
#define MPI_SASDEVICE2_ENC_MAP_MASK_START_INDEX (0x001FF800)
#define MPI_SASDEVICE2_ENC_MAP_SHIFT_START_INDEX (11)
/****************************************************************************
@ -2598,7 +2644,8 @@ typedef struct _CONFIG_PAGE_SAS_DEVICE_2
typedef struct _CONFIG_PAGE_SAS_PHY_0
{
CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
U32 Reserved1; /* 08h */
U16 OwnerDevHandle; /* 08h */
U16 Reserved1; /* 0Ah */
U64 SASAddress; /* 0Ch */
U16 AttachedDevHandle; /* 14h */
U8 AttachedPhyIdentifier; /* 16h */
@ -2607,12 +2654,12 @@ typedef struct _CONFIG_PAGE_SAS_PHY_0
U8 ProgrammedLinkRate; /* 20h */
U8 HwLinkRate; /* 21h */
U8 ChangeCount; /* 22h */
U8 Reserved3; /* 23h */
U8 Flags; /* 23h */
U32 PhyInfo; /* 24h */
} CONFIG_PAGE_SAS_PHY_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_PHY_0,
SasPhyPage0_t, MPI_POINTER pSasPhyPage0_t;
#define MPI_SASPHY0_PAGEVERSION (0x00)
#define MPI_SASPHY0_PAGEVERSION (0x01)
/* values for SAS PHY Page 0 ProgrammedLinkRate field */
#define MPI_SAS_PHY0_PRATE_MAX_RATE_MASK (0xF0)
@ -2632,6 +2679,9 @@ typedef struct _CONFIG_PAGE_SAS_PHY_0
#define MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5 (0x08)
#define MPI_SAS_PHY0_HWRATE_MIN_RATE_3_0 (0x09)
/* values for SAS PHY Page 0 Flags field */
#define MPI_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
/* values for SAS PHY Page 0 PhyInfo field */
#define MPI_SAS_PHY0_PHYINFO_SATA_PORT_ACTIVE (0x00004000)
#define MPI_SAS_PHY0_PHYINFO_SATA_PORT_SELECTOR (0x00002000)
@ -2690,7 +2740,7 @@ typedef struct _CONFIG_PAGE_SAS_ENCLOSURE_0
} CONFIG_PAGE_SAS_ENCLOSURE_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_ENCLOSURE_0,
SasEnclosurePage0_t, MPI_POINTER pSasEnclosurePage0_t;
#define MPI_SASENCLOSURE0_PAGEVERSION (0x00)
#define MPI_SASENCLOSURE0_PAGEVERSION (0x01)
/* values for SAS Enclosure Page 0 Flags field */
#define MPI_SAS_ENCLS0_FLAGS_SEP_BUS_ID_VALID (0x0020)
@ -2702,6 +2752,7 @@ typedef struct _CONFIG_PAGE_SAS_ENCLOSURE_0
#define MPI_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002)
#define MPI_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003)
#define MPI_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
#define MPI_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
/****************************************************************************

View file

@ -6,17 +6,17 @@
Copyright (c) 2000-2005 LSI Logic Corporation.
---------------------------------------
Header Set Release Version: 01.05.09
Header Set Release Version: 01.05.10
Header Set Release Date: 03-11-05
---------------------------------------
Filename Current version Prior version
---------- --------------- -------------
mpi.h 01.05.07 01.05.06
mpi_ioc.h 01.05.08 01.05.07
mpi_cnfg.h 01.05.08 01.05.07
mpi_init.h 01.05.04 01.05.03
mpi_targ.h 01.05.04 01.05.03
mpi.h 01.05.08 01.05.07
mpi_ioc.h 01.05.09 01.05.08
mpi_cnfg.h 01.05.09 01.05.08
mpi_init.h 01.05.05 01.05.04
mpi_targ.h 01.05.05 01.05.04
mpi_fc.h 01.05.01 01.05.01
mpi_lan.h 01.05.01 01.05.01
mpi_raid.h 01.05.02 01.05.02
@ -24,7 +24,7 @@
mpi_inb.h 01.05.01 01.05.01
mpi_sas.h 01.05.01 01.05.01
mpi_type.h 01.05.01 01.05.01
mpi_history.txt 01.05.09 01.05.08
mpi_history.txt 01.05.09 01.05.09
* Date Version Description
@ -88,6 +88,9 @@ mpi.h
* 03-11-05 01.05.07 Removed function codes for SCSI IO 32 and
* TargetAssistExtended requests.
* Removed EEDP IOCStatus codes.
* 06-24-05 01.05.08 Added function codes for SCSI IO 32 and
* TargetAssistExtended requests.
* Added EEDP IOCStatus codes.
* --------------------------------------------------------------------------
mpi_ioc.h
@ -159,6 +162,8 @@ mpi_ioc.h
* Reply and IOC Init Request.
* 03-11-05 01.05.08 Added family code for 1068E family.
* Removed IOCFacts Reply EEDP Capability bit.
* 06-24-05 01.05.09 Added 5 new IOCFacts Reply IOCCapabilities bits.
* Added Max SATA Targets to SAS Discovery Error event.
* --------------------------------------------------------------------------
mpi_cnfg.h
@ -380,6 +385,23 @@ mpi_cnfg.h
* New physical mapping mode in SAS IO Unit Page 2.
* Added CONFIG_PAGE_SAS_ENCLOSURE_0.
* Added Slot and Enclosure fields to SAS Device Page 0.
* 06-24-05 01.05.09 Added EEDP defines to IOC Page 1.
* Added more RAID type defines to IOC Page 2.
* Added Port Enable Delay settings to BIOS Page 1.
* Added Bad Block Table Full define to RAID Volume Page 0.
* Added Previous State defines to RAID Physical Disk
* Page 0.
* Added Max Sata Targets define for DiscoveryStatus field
* of SAS IO Unit Page 0.
* Added Device Self Test to Control Flags of SAS IO Unit
* Page 1.
* Added Direct Attach Starting Slot Number define for SAS
* IO Unit Page 2.
* Added new fields in SAS Device Page 2 for enclosure
* mapping.
* Added OwnerDevHandle and Flags field to SAS PHY Page 0.
* Added IOC GPIO Flags define to SAS Enclosure Page 0.
* Fixed the value for MPI_SAS_IOUNIT1_CONTROL_DEV_SATA_SUPPORT.
* --------------------------------------------------------------------------
mpi_init.h
@ -418,6 +440,8 @@ mpi_init.h
* Modified SCSI Enclosure Processor Request and Reply to
* support Enclosure/Slot addressing rather than WWID
* addressing.
* 06-24-05 01.05.05 Added SCSI IO 32 structures and defines.
* Added four new defines for SEP SlotStatus.
* --------------------------------------------------------------------------
mpi_targ.h
@ -461,6 +485,7 @@ mpi_targ.h
* 10-05-04 01.05.02 MSG_TARGET_CMD_BUFFER_POST_BASE_LIST_REPLY added.
* 02-22-05 01.05.03 Changed a comment.
* 03-11-05 01.05.04 Removed TargetAssistExtended Request.
* 06-24-05 01.05.05 Added TargetAssistExtended structures and defines.
* --------------------------------------------------------------------------
mpi_fc.h
@ -571,20 +596,20 @@ mpi_type.h
mpi_history.txt Parts list history
Filename 01.05.09
---------- --------
mpi.h 01.05.07
mpi_ioc.h 01.05.08
mpi_cnfg.h 01.05.08
mpi_init.h 01.05.04
mpi_targ.h 01.05.04
mpi_fc.h 01.05.01
mpi_lan.h 01.05.01
mpi_raid.h 01.05.02
mpi_tool.h 01.05.03
mpi_inb.h 01.05.01
mpi_sas.h 01.05.01
mpi_type.h 01.05.01
Filename 01.05.10 01.05.09
---------- -------- --------
mpi.h 01.05.08 01.05.07
mpi_ioc.h 01.05.09 01.05.08
mpi_cnfg.h 01.05.09 01.05.08
mpi_init.h 01.05.05 01.05.04
mpi_targ.h 01.05.05 01.05.04
mpi_fc.h 01.05.01 01.05.01
mpi_lan.h 01.05.01 01.05.01
mpi_raid.h 01.05.02 01.05.02
mpi_tool.h 01.05.03 01.05.03
mpi_inb.h 01.05.01 01.05.01
mpi_sas.h 01.05.01 01.05.01
mpi_type.h 01.05.01 01.05.01
Filename 01.05.08 01.05.07 01.05.06 01.05.05 01.05.04 01.05.03
---------- -------- -------- -------- -------- -------- --------

View file

@ -6,7 +6,7 @@
* Title: MPI initiator mode messages and structures
* Creation Date: June 8, 2000
*
* mpi_init.h Version: 01.05.04
* mpi_init.h Version: 01.05.05
*
* Version History
* ---------------
@ -48,6 +48,8 @@
* Modified SCSI Enclosure Processor Request and Reply to
* support Enclosure/Slot addressing rather than WWID
* addressing.
* 06-24-05 01.05.05 Added SCSI IO 32 structures and defines.
* Added four new defines for SEP SlotStatus.
* --------------------------------------------------------------------------
*/
@ -202,6 +204,197 @@ typedef struct _MSG_SCSI_IO_REPLY
#define MPI_SCSI_TASKTAG_UNKNOWN (0xFFFF)
/****************************************************************************/
/* SCSI IO 32 messages and associated structures */
/****************************************************************************/
typedef struct
{
U8 CDB[20]; /* 00h */
U32 PrimaryReferenceTag; /* 14h */
U16 PrimaryApplicationTag; /* 18h */
U16 PrimaryApplicationTagMask; /* 1Ah */
U32 TransferLength; /* 1Ch */
} MPI_SCSI_IO32_CDB_EEDP32, MPI_POINTER PTR_MPI_SCSI_IO32_CDB_EEDP32,
MpiScsiIo32CdbEedp32_t, MPI_POINTER pMpiScsiIo32CdbEedp32_t;
typedef struct
{
U8 CDB[16]; /* 00h */
U32 DataLength; /* 10h */
U32 PrimaryReferenceTag; /* 14h */
U16 PrimaryApplicationTag; /* 18h */
U16 PrimaryApplicationTagMask; /* 1Ah */
U32 TransferLength; /* 1Ch */
} MPI_SCSI_IO32_CDB_EEDP16, MPI_POINTER PTR_MPI_SCSI_IO32_CDB_EEDP16,
MpiScsiIo32CdbEedp16_t, MPI_POINTER pMpiScsiIo32CdbEedp16_t;
typedef union
{
U8 CDB32[32];
MPI_SCSI_IO32_CDB_EEDP32 EEDP32;
MPI_SCSI_IO32_CDB_EEDP16 EEDP16;
SGE_SIMPLE_UNION SGE;
} MPI_SCSI_IO32_CDB_UNION, MPI_POINTER PTR_MPI_SCSI_IO32_CDB_UNION,
MpiScsiIo32Cdb_t, MPI_POINTER pMpiScsiIo32Cdb_t;
typedef struct
{
U8 TargetID; /* 00h */
U8 Bus; /* 01h */
U16 Reserved1; /* 02h */
U32 Reserved2; /* 04h */
} MPI_SCSI_IO32_BUS_TARGET_ID_FORM, MPI_POINTER PTR_MPI_SCSI_IO32_BUS_TARGET_ID_FORM,
MpiScsiIo32BusTargetIdForm_t, MPI_POINTER pMpiScsiIo32BusTargetIdForm_t;
typedef union
{
MPI_SCSI_IO32_BUS_TARGET_ID_FORM SCSIID;
U64 WWID;
} MPI_SCSI_IO32_ADDRESS, MPI_POINTER PTR_MPI_SCSI_IO32_ADDRESS,
MpiScsiIo32Address_t, MPI_POINTER pMpiScsiIo32Address_t;
typedef struct _MSG_SCSI_IO32_REQUEST
{
U8 Port; /* 00h */
U8 Reserved1; /* 01h */
U8 ChainOffset; /* 02h */
U8 Function; /* 03h */
U8 CDBLength; /* 04h */
U8 SenseBufferLength; /* 05h */
U8 Flags; /* 06h */
U8 MsgFlags; /* 07h */
U32 MsgContext; /* 08h */
U8 LUN[8]; /* 0Ch */
U32 Control; /* 14h */
MPI_SCSI_IO32_CDB_UNION CDB; /* 18h */
U32 DataLength; /* 38h */
U32 BidirectionalDataLength; /* 3Ch */
U32 SecondaryReferenceTag; /* 40h */
U16 SecondaryApplicationTag; /* 44h */
U16 Reserved2; /* 46h */
U16 EEDPFlags; /* 48h */
U16 ApplicationTagTranslationMask; /* 4Ah */
U32 EEDPBlockSize; /* 4Ch */
MPI_SCSI_IO32_ADDRESS DeviceAddress; /* 50h */
U8 SGLOffset0; /* 58h */
U8 SGLOffset1; /* 59h */
U8 SGLOffset2; /* 5Ah */
U8 SGLOffset3; /* 5Bh */
U32 Reserved3; /* 5Ch */
U32 Reserved4; /* 60h */
U32 SenseBufferLowAddr; /* 64h */
SGE_IO_UNION SGL; /* 68h */
} MSG_SCSI_IO32_REQUEST, MPI_POINTER PTR_MSG_SCSI_IO32_REQUEST,
SCSIIO32Request_t, MPI_POINTER pSCSIIO32Request_t;
/* SCSI IO 32 MsgFlags bits */
#define MPI_SCSIIO32_MSGFLGS_SENSE_WIDTH (0x01)
#define MPI_SCSIIO32_MSGFLGS_SENSE_WIDTH_32 (0x00)
#define MPI_SCSIIO32_MSGFLGS_SENSE_WIDTH_64 (0x01)
#define MPI_SCSIIO32_MSGFLGS_SENSE_LOCATION (0x02)
#define MPI_SCSIIO32_MSGFLGS_SENSE_LOC_HOST (0x00)
#define MPI_SCSIIO32_MSGFLGS_SENSE_LOC_IOC (0x02)
#define MPI_SCSIIO32_MSGFLGS_CMD_DETERMINES_DATA_DIR (0x04)
#define MPI_SCSIIO32_MSGFLGS_SGL_OFFSETS_CHAINS (0x08)
#define MPI_SCSIIO32_MSGFLGS_MULTICAST (0x10)
#define MPI_SCSIIO32_MSGFLGS_BIDIRECTIONAL (0x20)
#define MPI_SCSIIO32_MSGFLGS_LARGE_CDB (0x40)
/* SCSI IO 32 Flags bits */
#define MPI_SCSIIO32_FLAGS_FORM_MASK (0x03)
#define MPI_SCSIIO32_FLAGS_FORM_SCSIID (0x00)
#define MPI_SCSIIO32_FLAGS_FORM_WWID (0x01)
/* SCSI IO 32 LUN fields */
#define MPI_SCSIIO32_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF)
#define MPI_SCSIIO32_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000)
#define MPI_SCSIIO32_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF)
#define MPI_SCSIIO32_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000)
#define MPI_SCSIIO32_LUN_LEVEL_1_WORD (0xFF00)
#define MPI_SCSIIO32_LUN_LEVEL_1_DWORD (0x0000FF00)
/* SCSI IO 32 Control bits */
#define MPI_SCSIIO32_CONTROL_DATADIRECTION_MASK (0x03000000)
#define MPI_SCSIIO32_CONTROL_NODATATRANSFER (0x00000000)
#define MPI_SCSIIO32_CONTROL_WRITE (0x01000000)
#define MPI_SCSIIO32_CONTROL_READ (0x02000000)
#define MPI_SCSIIO32_CONTROL_BIDIRECTIONAL (0x03000000)
#define MPI_SCSIIO32_CONTROL_ADDCDBLEN_MASK (0xFC000000)
#define MPI_SCSIIO32_CONTROL_ADDCDBLEN_SHIFT (26)
#define MPI_SCSIIO32_CONTROL_TASKATTRIBUTE_MASK (0x00000700)
#define MPI_SCSIIO32_CONTROL_SIMPLEQ (0x00000000)
#define MPI_SCSIIO32_CONTROL_HEADOFQ (0x00000100)
#define MPI_SCSIIO32_CONTROL_ORDEREDQ (0x00000200)
#define MPI_SCSIIO32_CONTROL_ACAQ (0x00000400)
#define MPI_SCSIIO32_CONTROL_UNTAGGED (0x00000500)
#define MPI_SCSIIO32_CONTROL_NO_DISCONNECT (0x00000700)
#define MPI_SCSIIO32_CONTROL_TASKMANAGE_MASK (0x00FF0000)
#define MPI_SCSIIO32_CONTROL_OBSOLETE (0x00800000)
#define MPI_SCSIIO32_CONTROL_CLEAR_ACA_RSV (0x00400000)
#define MPI_SCSIIO32_CONTROL_TARGET_RESET (0x00200000)
#define MPI_SCSIIO32_CONTROL_LUN_RESET_RSV (0x00100000)
#define MPI_SCSIIO32_CONTROL_RESERVED (0x00080000)
#define MPI_SCSIIO32_CONTROL_CLR_TASK_SET_RSV (0x00040000)
#define MPI_SCSIIO32_CONTROL_ABORT_TASK_SET (0x00020000)
#define MPI_SCSIIO32_CONTROL_RESERVED2 (0x00010000)
/* SCSI IO 32 EEDPFlags */
#define MPI_SCSIIO32_EEDPFLAGS_MASK_OP (0x0007)
#define MPI_SCSIIO32_EEDPFLAGS_NOOP_OP (0x0000)
#define MPI_SCSIIO32_EEDPFLAGS_CHK_OP (0x0001)
#define MPI_SCSIIO32_EEDPFLAGS_STRIP_OP (0x0002)
#define MPI_SCSIIO32_EEDPFLAGS_CHKRM_OP (0x0003)
#define MPI_SCSIIO32_EEDPFLAGS_INSERT_OP (0x0004)
#define MPI_SCSIIO32_EEDPFLAGS_REPLACE_OP (0x0006)
#define MPI_SCSIIO32_EEDPFLAGS_CHKREGEN_OP (0x0007)
#define MPI_SCSIIO32_EEDPFLAGS_PASS_REF_TAG (0x0008)
#define MPI_SCSIIO32_EEDPFLAGS_8_9THS_MODE (0x0010)
#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_MASK (0x0700)
#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_GUARD (0x0100)
#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_REFTAG (0x0200)
#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_LBATAG (0x0400)
#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_SHIFT (8)
#define MPI_SCSIIO32_EEDPFLAGS_INC_SEC_APPTAG (0x1000)
#define MPI_SCSIIO32_EEDPFLAGS_INC_PRI_APPTAG (0x2000)
#define MPI_SCSIIO32_EEDPFLAGS_INC_SEC_REFTAG (0x4000)
#define MPI_SCSIIO32_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
/* SCSIIO32 IO reply structure */
typedef struct _MSG_SCSIIO32_IO_REPLY
{
U8 Port; /* 00h */
U8 Reserved1; /* 01h */
U8 MsgLength; /* 02h */
U8 Function; /* 03h */
U8 CDBLength; /* 04h */
U8 SenseBufferLength; /* 05h */
U8 Flags; /* 06h */
U8 MsgFlags; /* 07h */
U32 MsgContext; /* 08h */
U8 SCSIStatus; /* 0Ch */
U8 SCSIState; /* 0Dh */
U16 IOCStatus; /* 0Eh */
U32 IOCLogInfo; /* 10h */
U32 TransferCount; /* 14h */
U32 SenseCount; /* 18h */
U32 ResponseInfo; /* 1Ch */
U16 TaskTag; /* 20h */
U16 Reserved2; /* 22h */
U32 BidirectionalTransferCount; /* 24h */
} MSG_SCSIIO32_IO_REPLY, MPI_POINTER PTR_MSG_SCSIIO32_IO_REPLY,
SCSIIO32Reply_t, MPI_POINTER pSCSIIO32Reply_t;
/****************************************************************************/
/* SCSI Task Management messages */
/****************************************************************************/
@ -310,10 +503,14 @@ typedef struct _MSG_SEP_REQUEST
#define MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080)
#define MPI_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100)
#define MPI_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
#define MPI_SEP_REQ_SLOTSTATUS_REQ_CONSISTENCY_CHECK (0x00001000)
#define MPI_SEP_REQ_SLOTSTATUS_DISABLE (0x00002000)
#define MPI_SEP_REQ_SLOTSTATUS_REQ_RESERVED_DEVICE (0x00004000)
#define MPI_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000)
#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_INSERT (0x00080000)
#define MPI_SEP_REQ_SLOTSTATUS_DO_NOT_MOVE (0x00400000)
#define MPI_SEP_REQ_SLOTSTATUS_ACTIVE (0x00800000)
#define MPI_SEP_REQ_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000)
#define MPI_SEP_REQ_SLOTSTATUS_A_ENABLE_BYPASS (0x08000000)
#define MPI_SEP_REQ_SLOTSTATUS_DEV_OFF (0x10000000)
@ -352,11 +549,15 @@ typedef struct _MSG_SEP_REPLY
#define MPI_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080)
#define MPI_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100)
#define MPI_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
#define MPI_SEP_REPLY_SLOTSTATUS_CONSISTENCY_CHECK (0x00001000)
#define MPI_SEP_REPLY_SLOTSTATUS_DISABLE (0x00002000)
#define MPI_SEP_REPLY_SLOTSTATUS_RESERVED_DEVICE (0x00004000)
#define MPI_SEP_REPLY_SLOTSTATUS_REPORT (0x00010000)
#define MPI_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
#define MPI_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000)
#define MPI_SEP_REPLY_SLOTSTATUS_INSERT_READY (0x00080000)
#define MPI_SEP_REPLY_SLOTSTATUS_DO_NOT_REMOVE (0x00400000)
#define MPI_SEP_REPLY_SLOTSTATUS_ACTIVE (0x00800000)
#define MPI_SEP_REPLY_SLOTSTATUS_B_BYPASS_ENABLED (0x01000000)
#define MPI_SEP_REPLY_SLOTSTATUS_A_BYPASS_ENABLED (0x02000000)
#define MPI_SEP_REPLY_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000)

View file

@ -6,7 +6,7 @@
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: August 11, 2000
*
* mpi_ioc.h Version: 01.05.08
* mpi_ioc.h Version: 01.05.09
*
* Version History
* ---------------
@ -81,6 +81,8 @@
* Reply and IOC Init Request.
* 03-11-05 01.05.08 Added family code for 1068E family.
* Removed IOCFacts Reply EEDP Capability bit.
* 06-24-05 01.05.09 Added 5 new IOCFacts Reply IOCCapabilities bits.
* Added Max SATA Targets to SAS Discovery Error event.
* --------------------------------------------------------------------------
*/
@ -261,7 +263,11 @@ typedef struct _MSG_IOC_FACTS_REPLY
#define MPI_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008)
#define MPI_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010)
#define MPI_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020)
#define MPI_IOCFACTS_CAPABILITY_EEDP (0x00000040)
#define MPI_IOCFACTS_CAPABILITY_BIDIRECTIONAL (0x00000080)
#define MPI_IOCFACTS_CAPABILITY_MULTICAST (0x00000100)
#define MPI_IOCFACTS_CAPABILITY_SCSIIO32 (0x00000200)
#define MPI_IOCFACTS_CAPABILITY_NO_SCSIIO16 (0x00000400)
/*****************************************************************************
@ -677,6 +683,7 @@ typedef struct _EVENT_DATA_DISCOVERY_ERROR
#define MPI_EVENT_DSCVRY_ERR_DS_MULTPL_SUBTRACTIVE (0x00000200)
#define MPI_EVENT_DSCVRY_ERR_DS_TABLE_TO_TABLE (0x00000400)
#define MPI_EVENT_DSCVRY_ERR_DS_MULTPL_PATHS (0x00000800)
#define MPI_EVENT_DSCVRY_ERR_DS_MAX_SATA_TARGETS (0x00001000)
/*****************************************************************************

View file

@ -6,7 +6,7 @@
* Title: MPI Target mode messages and structures
* Creation Date: June 22, 2000
*
* mpi_targ.h Version: 01.05.04
* mpi_targ.h Version: 01.05.05
*
* Version History
* ---------------
@ -53,6 +53,7 @@
* 10-05-04 01.05.02 MSG_TARGET_CMD_BUFFER_POST_BASE_LIST_REPLY added.
* 02-22-05 01.05.03 Changed a comment.
* 03-11-05 01.05.04 Removed TargetAssistExtended Request.
* 06-24-05 01.05.05 Added TargetAssistExtended structures and defines.
* --------------------------------------------------------------------------
*/
@ -370,6 +371,77 @@ typedef struct _MSG_TARGET_ERROR_REPLY
TargetErrorReply_t, MPI_POINTER pTargetErrorReply_t;
/****************************************************************************/
/* Target Assist Extended Request */
/****************************************************************************/
typedef struct _MSG_TARGET_ASSIST_EXT_REQUEST
{
U8 StatusCode; /* 00h */
U8 TargetAssistFlags; /* 01h */
U8 ChainOffset; /* 02h */
U8 Function; /* 03h */
U16 QueueTag; /* 04h */
U8 Reserved1; /* 06h */
U8 MsgFlags; /* 07h */
U32 MsgContext; /* 08h */
U32 ReplyWord; /* 0Ch */
U8 LUN[8]; /* 10h */
U32 RelativeOffset; /* 18h */
U32 Reserved2; /* 1Ch */
U32 Reserved3; /* 20h */
U32 PrimaryReferenceTag; /* 24h */
U16 PrimaryApplicationTag; /* 28h */
U16 PrimaryApplicationTagMask; /* 2Ah */
U32 Reserved4; /* 2Ch */
U32 DataLength; /* 30h */
U32 BidirectionalDataLength; /* 34h */
U32 SecondaryReferenceTag; /* 38h */
U16 SecondaryApplicationTag; /* 3Ch */
U16 Reserved5; /* 3Eh */
U16 EEDPFlags; /* 40h */
U16 ApplicationTagTranslationMask; /* 42h */
U32 EEDPBlockSize; /* 44h */
U8 SGLOffset0; /* 48h */
U8 SGLOffset1; /* 49h */
U8 SGLOffset2; /* 4Ah */
U8 SGLOffset3; /* 4Bh */
U32 Reserved6; /* 4Ch */
SGE_IO_UNION SGL[1]; /* 50h */
} MSG_TARGET_ASSIST_EXT_REQUEST, MPI_POINTER PTR_MSG_TARGET_ASSIST_EXT_REQUEST,
TargetAssistExtRequest_t, MPI_POINTER pTargetAssistExtRequest_t;
/* see the defines after MSG_TARGET_ASSIST_REQUEST for TargetAssistFlags */
/* defines for the MsgFlags field */
#define TARGET_ASSIST_EXT_MSGFLAGS_BIDIRECTIONAL (0x20)
#define TARGET_ASSIST_EXT_MSGFLAGS_MULTICAST (0x10)
#define TARGET_ASSIST_EXT_MSGFLAGS_SGL_OFFSET_CHAINS (0x08)
/* defines for the EEDPFlags field */
#define TARGET_ASSIST_EXT_EEDP_MASK_OP (0x0007)
#define TARGET_ASSIST_EXT_EEDP_NOOP_OP (0x0000)
#define TARGET_ASSIST_EXT_EEDP_CHK_OP (0x0001)
#define TARGET_ASSIST_EXT_EEDP_STRIP_OP (0x0002)
#define TARGET_ASSIST_EXT_EEDP_CHKRM_OP (0x0003)
#define TARGET_ASSIST_EXT_EEDP_INSERT_OP (0x0004)
#define TARGET_ASSIST_EXT_EEDP_REPLACE_OP (0x0006)
#define TARGET_ASSIST_EXT_EEDP_CHKREGEN_OP (0x0007)
#define TARGET_ASSIST_EXT_EEDP_PASS_REF_TAG (0x0008)
#define TARGET_ASSIST_EXT_EEDP_T10_CHK_MASK (0x0700)
#define TARGET_ASSIST_EXT_EEDP_T10_CHK_GUARD (0x0100)
#define TARGET_ASSIST_EXT_EEDP_T10_CHK_APPTAG (0x0200)
#define TARGET_ASSIST_EXT_EEDP_T10_CHK_REFTAG (0x0400)
#define TARGET_ASSIST_EXT_EEDP_T10_CHK_SHIFT (8)
#define TARGET_ASSIST_EXT_EEDP_INC_SEC_APPTAG (0x1000)
#define TARGET_ASSIST_EXT_EEDP_INC_PRI_APPTAG (0x2000)
#define TARGET_ASSIST_EXT_EEDP_INC_SEC_REFTAG (0x4000)
#define TARGET_ASSIST_EXT_EEDP_INC_PRI_REFTAG (0x8000)
/****************************************************************************/
/* Target Status Send Request */
/****************************************************************************/

View file

@ -218,8 +218,7 @@ pci_enable_io_access(struct pci_dev *pdev)
* (also referred to as a IO Controller or IOC).
* This routine must clear the interrupt from the adapter and does
* so by reading the reply FIFO. Multiple replies may be processed
* per single call to this routine; up to MPT_MAX_REPLIES_PER_ISR
* which is currently set to 32 in mptbase.h.
* per single call to this routine.
*
* This routine handles register-level access of the adapter but
* dispatches (calls) a protocol-specific callback routine to handle
@ -279,11 +278,11 @@ mpt_interrupt(int irq, void *bus_id, struct pt_regs *r)
cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x\n",
ioc->name, mr, req_idx));
dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
DBG_DUMP_REPLY_FRAME(mr)
/* Check/log IOC log info
/* Check/log IOC log info
*/
ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
@ -345,7 +344,7 @@ mpt_interrupt(int irq, void *bus_id, struct pt_regs *r)
if ((mf) && ((mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))
|| (mf < ioc->req_frames)) ) {
printk(MYIOC_s_WARN_FMT
"mpt_interrupt: Invalid mf (%p) req_idx (%d)!\n", ioc->name, (void *)mf, req_idx);
"mpt_interrupt: Invalid mf (%p)!\n", ioc->name, (void *)mf);
cb_idx = 0;
pa = 0;
freeme = 0;
@ -399,7 +398,7 @@ mpt_interrupt(int irq, void *bus_id, struct pt_regs *r)
* @mf: Pointer to original MPT request frame
* @reply: Pointer to MPT reply frame (NULL if TurboReply)
*
* Returns 1 indicating original alloc'd request frame ptr
* Returns 1 indicating original alloc'd request frame ptr
* should be freed, or 0 if it shouldn't.
*/
static int
@ -408,28 +407,17 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
int freereq = 1;
u8 func;
dprintk((MYIOC_s_INFO_FMT "mpt_base_reply() called\n", ioc->name));
if ((mf == NULL) ||
(mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) {
printk(MYIOC_s_ERR_FMT "NULL or BAD request frame ptr! (=%p)\n",
ioc->name, (void *)mf);
return 1;
}
if (reply == NULL) {
dprintk((MYIOC_s_ERR_FMT "Unexpected NULL Event (turbo?) reply!\n",
ioc->name));
return 1;
}
dmfprintk((MYIOC_s_INFO_FMT "mpt_base_reply() called\n", ioc->name));
#if defined(MPT_DEBUG_MSG_FRAME)
if (!(reply->u.hdr.MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) {
dmfprintk((KERN_INFO MYNAM ": Original request frame (@%p) header\n", mf));
DBG_DUMP_REQUEST_FRAME_HDR(mf)
}
#endif
func = reply->u.hdr.Function;
dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, Function=%02Xh\n",
dmfprintk((MYIOC_s_INFO_FMT "mpt_base_reply, Function=%02Xh\n",
ioc->name, func));
if (func == MPI_FUNCTION_EVENT_NOTIFICATION) {
@ -448,8 +436,14 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
* Hmmm... It seems that EventNotificationReply is an exception
* to the rule of one reply per request.
*/
if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)
if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) {
freereq = 0;
devtprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p does not return Request frame\n",
ioc->name, pEvReply));
} else {
devtprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n",
ioc->name, pEvReply));
}
#ifdef CONFIG_PROC_FS
// LogEvent(ioc, pEvReply);
@ -491,10 +485,21 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
pCfg->status = status;
if (status == MPI_IOCSTATUS_SUCCESS) {
pCfg->hdr->PageVersion = pReply->Header.PageVersion;
pCfg->hdr->PageLength = pReply->Header.PageLength;
pCfg->hdr->PageNumber = pReply->Header.PageNumber;
pCfg->hdr->PageType = pReply->Header.PageType;
if ((pReply->Header.PageType &
MPI_CONFIG_PAGETYPE_MASK) ==
MPI_CONFIG_PAGETYPE_EXTENDED) {
pCfg->cfghdr.ehdr->ExtPageLength =
le16_to_cpu(pReply->ExtPageLength);
pCfg->cfghdr.ehdr->ExtPageType =
pReply->ExtPageType;
}
pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
/* If this is a regular header, save PageLength. */
/* LMP Do this better so not using a reserved field! */
pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
}
}
@ -705,7 +710,7 @@ mpt_device_driver_deregister(int cb_idx)
if (dd_cbfunc->remove)
dd_cbfunc->remove(ioc->pcidev);
}
MptDeviceDriverHandlers[cb_idx] = NULL;
}
@ -818,7 +823,7 @@ mpt_put_msg_frame(int handle, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
}
#endif
mf_dma_addr = (ioc->req_frames_low_dma + req_offset) | ioc->RequestNB[req_idx];
mf_dma_addr = (ioc->req_frames_low_dma + req_offset) | ioc->RequestNB[req_idx];
dsgprintk((MYIOC_s_INFO_FMT "mf_dma_addr=%x req_idx=%d RequestNB=%x\n", ioc->name, mf_dma_addr, req_idx, ioc->RequestNB[req_idx]));
CHIPREG_WRITE32(&ioc->chip->RequestFifo, mf_dma_addr);
}
@ -920,7 +925,7 @@ mpt_send_handshake_request(int handle, MPT_ADAPTER *ioc, int reqBytes, u32 *req,
/* Make sure there are no doorbells */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
CHIPREG_WRITE32(&ioc->chip->Doorbell,
((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) |
((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT)));
@ -935,14 +940,14 @@ mpt_send_handshake_request(int handle, MPT_ADAPTER *ioc, int reqBytes, u32 *req,
return -5;
dhsprintk((KERN_INFO MYNAM ": %s: mpt_send_handshake_request start, WaitCnt=%d\n",
ioc->name, ii));
ioc->name, ii));
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
return -2;
}
/* Send request via doorbell handshake */
req_as_bytes = (u8 *) req;
for (ii = 0; ii < reqBytes/4; ii++) {
@ -988,9 +993,9 @@ mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
if (ioc->id == iocid) {
*iocpp =ioc;
return iocid;
}
}
}
*iocpp = NULL;
return -1;
}
@ -1032,9 +1037,9 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
if (pci_enable_device(pdev))
return r;
dinitprintk((KERN_WARNING MYNAM ": mpt_adapter_install\n"));
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
dprintk((KERN_INFO MYNAM
": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n"));
@ -1059,7 +1064,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->alloc_total = sizeof(MPT_ADAPTER);
ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */
ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
ioc->pcidev = pdev;
ioc->diagPending = 0;
spin_lock_init(&ioc->diagLock);
@ -1088,7 +1093,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
/* Find lookup slot. */
INIT_LIST_HEAD(&ioc->list);
ioc->id = mpt_ids++;
mem_phys = msize = 0;
port = psize = 0;
for (ii=0; ii < DEVICE_COUNT_RESOURCE; ii++) {
@ -1143,7 +1148,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->prod_name = "LSIFC909";
ioc->bus_type = FC;
}
if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC929) {
else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC929) {
ioc->prod_name = "LSIFC929";
ioc->bus_type = FC;
}
@ -1322,7 +1327,7 @@ mpt_detach(struct pci_dev *pdev)
remove_proc_entry(pname, NULL);
sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name);
remove_proc_entry(pname, NULL);
/* call per device driver remove entry point */
for(ii=0; ii<MPT_MAX_PROTOCOL_DRIVERS; ii++) {
if(MptDeviceDriverHandlers[ii] &&
@ -1330,7 +1335,7 @@ mpt_detach(struct pci_dev *pdev)
MptDeviceDriverHandlers[ii]->remove(pdev);
}
}
/* Disable interrupts! */
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
@ -1403,7 +1408,7 @@ mpt_resume(struct pci_dev *pdev)
u32 device_state = pdev->current_state;
int recovery_state;
int ii;
printk(MYIOC_s_INFO_FMT
"pci-resume: pdev=0x%p, slot=%s, Previous operating state [D%d]\n",
ioc->name, pdev, pci_name(pdev), device_state);
@ -1534,7 +1539,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
if ((rc = GetIocFacts(ioc, sleepFlag, reason)) == 0)
break;
}
if (ii == 5) {
dinitprintk((MYIOC_s_INFO_FMT "Retry IocFacts failed rc=%x\n", ioc->name, rc));
@ -1542,7 +1547,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
} else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
MptDisplayIocCapabilities(ioc);
}
if (alt_ioc_ready) {
if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) {
dinitprintk((MYIOC_s_INFO_FMT "Initial Alt IocFacts failed rc=%x\n", ioc->name, rc));
@ -1613,7 +1618,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
if (reset_alt_ioc_active && ioc->alt_ioc) {
/* (re)Enable alt-IOC! (reply interrupt) */
dprintk((KERN_INFO MYNAM ": alt-%s reply irq re-enabled\n",
dinitprintk((KERN_INFO MYNAM ": alt-%s reply irq re-enabled\n",
ioc->alt_ioc->name));
CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, ~(MPI_HIM_RIM));
ioc->alt_ioc->active = 1;
@ -1670,7 +1675,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
/* Find IM volumes
*/
if (ioc->facts.MsgVersion >= 0x0102)
if (ioc->facts.MsgVersion >= MPI_VERSION_01_02)
mpt_findImVolumes(ioc);
/* Check, and possibly reset, the coalescing value
@ -1700,7 +1705,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
}
if (alt_ioc_ready && MptResetHandlers[ii]) {
dprintk((MYIOC_s_INFO_FMT "Calling alt-%s post_reset handler #%d\n",
drsprintk((MYIOC_s_INFO_FMT "Calling alt-%s post_reset handler #%d\n",
ioc->name, ioc->alt_ioc->name, ii));
rc += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_POST_RESET);
handlers++;
@ -1733,8 +1738,8 @@ mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
dprintk((MYIOC_s_INFO_FMT "PCI device %s devfn=%x/%x,"
" searching for devfn match on %x or %x\n",
ioc->name, pci_name(pdev), pdev->devfn,
func-1, func+1));
ioc->name, pci_name(pdev), pdev->bus->number,
pdev->devfn, func-1, func+1));
peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func-1));
if (!peer) {
@ -1861,36 +1866,39 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
static void
mpt_adapter_dispose(MPT_ADAPTER *ioc)
{
if (ioc != NULL) {
int sz_first, sz_last;
int sz_first, sz_last;
sz_first = ioc->alloc_total;
if (ioc == NULL)
return;
mpt_adapter_disable(ioc);
sz_first = ioc->alloc_total;
if (ioc->pci_irq != -1) {
free_irq(ioc->pci_irq, ioc);
ioc->pci_irq = -1;
}
mpt_adapter_disable(ioc);
if (ioc->memmap != NULL)
iounmap(ioc->memmap);
if (ioc->pci_irq != -1) {
free_irq(ioc->pci_irq, ioc);
ioc->pci_irq = -1;
}
if (ioc->memmap != NULL) {
iounmap(ioc->memmap);
ioc->memmap = NULL;
}
#if defined(CONFIG_MTRR) && 0
if (ioc->mtrr_reg > 0) {
mtrr_del(ioc->mtrr_reg, 0, 0);
dprintk((KERN_INFO MYNAM ": %s: MTRR region de-registered\n", ioc->name));
}
if (ioc->mtrr_reg > 0) {
mtrr_del(ioc->mtrr_reg, 0, 0);
dprintk((KERN_INFO MYNAM ": %s: MTRR region de-registered\n", ioc->name));
}
#endif
/* Zap the adapter lookup ptr! */
list_del(&ioc->list);
/* Zap the adapter lookup ptr! */
list_del(&ioc->list);
sz_last = ioc->alloc_total;
dprintk((KERN_INFO MYNAM ": %s: free'd %d of %d bytes\n",
ioc->name, sz_first-sz_last+(int)sizeof(*ioc), sz_first));
kfree(ioc);
}
sz_last = ioc->alloc_total;
dprintk((KERN_INFO MYNAM ": %s: free'd %d of %d bytes\n",
ioc->name, sz_first-sz_last+(int)sizeof(*ioc), sz_first));
kfree(ioc);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@ -1977,7 +1985,7 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
}
/* Is it already READY? */
if (!statefault && (ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)
if (!statefault && (ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)
return 0;
/*
@ -1995,7 +2003,7 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
* Hmmm... Did it get left operational?
*/
if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) {
dinitprintk((MYIOC_s_WARN_FMT "IOC operational unexpected\n",
dinitprintk((MYIOC_s_INFO_FMT "IOC operational unexpected\n",
ioc->name));
/* Check WhoInit.
@ -2004,8 +2012,8 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
* Else, fall through to KickStart case
*/
whoinit = (ioc_state & MPI_DOORBELL_WHO_INIT_MASK) >> MPI_DOORBELL_WHO_INIT_SHIFT;
dprintk((KERN_WARNING MYNAM
": whoinit 0x%x\n statefault %d force %d\n",
dinitprintk((KERN_INFO MYNAM
": whoinit 0x%x statefault %d force %d\n",
whoinit, statefault, force));
if (whoinit == MPI_WHOINIT_PCI_PEER)
return -4;
@ -2140,8 +2148,8 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
get_facts.Function = MPI_FUNCTION_IOC_FACTS;
/* Assert: All other get_facts fields are zero! */
dinitprintk((MYIOC_s_INFO_FMT
"Sending get IocFacts request req_sz=%d reply_sz=%d\n",
dinitprintk((MYIOC_s_INFO_FMT
"Sending get IocFacts request req_sz=%d reply_sz=%d\n",
ioc->name, req_sz, reply_sz));
/* No non-zero fields in the get_facts request are greater than
@ -2174,7 +2182,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions);
facts->IOCStatus = le16_to_cpu(facts->IOCStatus);
facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo);
status = facts->IOCStatus & MPI_IOCSTATUS_MASK;
status = le16_to_cpu(facts->IOCStatus) & MPI_IOCSTATUS_MASK;
/* CHECKME! IOCStatus, IOCLogInfo */
facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth);
@ -2221,7 +2229,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
if ( sz & 0x02 )
sz += 2;
facts->FWImageSize = sz;
if (!facts->RequestFrameSize) {
/* Something is wrong! */
printk(MYIOC_s_ERR_FMT "IOC reported invalid 0 request size!\n",
@ -2240,7 +2248,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
ioc->NBShiftFactor = shiftFactor;
dinitprintk((MYIOC_s_INFO_FMT "NB_for_64_byte_frame=%x NBShiftFactor=%x BlockSize=%x\n",
ioc->name, vv, shiftFactor, r));
if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
/*
* Set values for this IOC's request & reply frame sizes,
@ -2261,7 +2269,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
return r;
}
} else {
printk(MYIOC_s_ERR_FMT
printk(MYIOC_s_ERR_FMT
"Invalid IOC facts reply, msgLength=%d offsetof=%zd!\n",
ioc->name, facts->MsgLength, (offsetof(IOCFactsReply_t,
RequestFrameSize)/sizeof(u32)));
@ -2413,9 +2421,11 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
dhsprintk((MYIOC_s_INFO_FMT "Sending PortEnable (req @ %p)\n",
ioc->name, &ioc_init));
if ((r = SendPortEnable(ioc, 0, sleepFlag)) != 0)
if ((r = SendPortEnable(ioc, 0, sleepFlag)) != 0) {
printk(MYIOC_s_ERR_FMT "Sending PortEnable failed(%d)!\n",ioc->name, r);
return r;
}
/* YIKES! SUPER IMPORTANT!!!
* Poll IocState until _OPERATIONAL while IOC is doing
@ -2440,7 +2450,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
state = mpt_GetIocState(ioc, 1);
count++;
}
dhsprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n",
dinitprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n",
ioc->name, count));
return r;
@ -2529,7 +2539,7 @@ mpt_free_fw_memory(MPT_ADAPTER *ioc)
int sz;
sz = ioc->facts.FWImageSize;
dinitprintk((KERN_WARNING MYNAM "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n",
dinitprintk((KERN_INFO MYNAM "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n",
ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
pci_free_consistent(ioc->pcidev, sz,
ioc->cached_fw, ioc->cached_fw_dma);
@ -2573,9 +2583,9 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
mpt_alloc_fw_memory(ioc, sz);
dinitprintk((KERN_WARNING MYNAM ": FW Image @ %p[%p], sz=%d[%x] bytes\n",
dinitprintk((KERN_INFO MYNAM ": FW Image @ %p[%p], sz=%d[%x] bytes\n",
ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
if (ioc->cached_fw == NULL) {
/* Major Failure.
*/
@ -2605,14 +2615,14 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
mpt_add_sge(&request[sgeoffset], flagsLength, ioc->cached_fw_dma);
sgeoffset += sizeof(u32) + sizeof(dma_addr_t);
dinitprintk((KERN_WARNING MYNAM "Sending FW Upload (req @ %p) sgeoffset=%d \n",
dinitprintk((KERN_INFO MYNAM ": Sending FW Upload (req @ %p) sgeoffset=%d \n",
prequest, sgeoffset));
DBG_DUMP_FW_REQUEST_FRAME(prequest)
ii = mpt_handshake_req_reply_wait(ioc, sgeoffset, (u32*)prequest,
reply_sz, (u16*)preply, 65 /*seconds*/, sleepFlag);
dinitprintk((KERN_WARNING MYNAM "FW Upload completed rc=%x \n", ii));
dinitprintk((KERN_INFO MYNAM ": FW Upload completed rc=%x \n", ii));
cmdStatus = -EFAULT;
if (ii == 0) {
@ -2627,10 +2637,10 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
cmdStatus = 0;
}
}
dinitprintk((MYIOC_s_INFO_FMT ": do_upload status %d \n",
dinitprintk((MYIOC_s_INFO_FMT ": do_upload cmdStatus=%d \n",
ioc->name, cmdStatus));
if (cmdStatus) {
ddlprintk((MYIOC_s_INFO_FMT ": fw upload failed, freeing image \n",
@ -2761,8 +2771,8 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
fwSize = (pExtImage->ImageSize + 3) >> 2;
ptrFw = (u32 *)pExtImage;
ddlprintk((MYIOC_s_INFO_FMT "Write Ext Image: 0x%x bytes @ %p load_addr=%x\n",
ioc->name, fwSize*4, ptrFw, load_addr));
ddlprintk((MYIOC_s_INFO_FMT "Write Ext Image: 0x%x (%d) bytes @ %p load_addr=%x\n",
ioc->name, fwSize*4, fwSize*4, ptrFw, load_addr));
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, load_addr);
while (fwSize--) {
@ -2845,9 +2855,9 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
* 0 else
*
* Returns:
* 1 - hard reset, READY
* 0 - no reset due to History bit, READY
* -1 - no reset due to History bit but not READY
* 1 - hard reset, READY
* 0 - no reset due to History bit, READY
* -1 - no reset due to History bit but not READY
* OR reset but failed to come READY
* -2 - no reset, could not enter DIAG mode
* -3 - reset but bad FW bit
@ -2990,7 +3000,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
*
*/
CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_DISABLE_ARM);
mdelay (1);
mdelay(1);
/*
* Now hit the reset bit in the Diagnostic register
@ -3170,7 +3180,7 @@ SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag)
u32 state;
int cntdn, count;
drsprintk((KERN_WARNING MYNAM ": %s: Sending IOC reset(0x%02x)!\n",
drsprintk((KERN_INFO MYNAM ": %s: Sending IOC reset(0x%02x)!\n",
ioc->name, reset_type));
CHIPREG_WRITE32(&ioc->chip->Doorbell, reset_type<<MPI_DOORBELL_FUNCTION_SHIFT);
if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
@ -3374,6 +3384,9 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
ioc->reply_frames = (MPT_FRAME_HDR *) mem;
ioc->reply_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
dinitprintk((KERN_INFO MYNAM ": %s ReplyBuffers @ %p[%p]\n",
ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma));
alloc_dma += reply_sz;
mem += reply_sz;
@ -3382,7 +3395,7 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
ioc->req_frames = (MPT_FRAME_HDR *) mem;
ioc->req_frames_dma = alloc_dma;
dinitprintk((KERN_INFO MYNAM ": %s.RequestBuffers @ %p[%p]\n",
dinitprintk((KERN_INFO MYNAM ": %s RequestBuffers @ %p[%p]\n",
ioc->name, mem, (void *)(ulong)alloc_dma));
ioc->req_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
@ -3408,7 +3421,7 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
ioc->ChainBuffer = mem;
ioc->ChainBufferDMA = alloc_dma;
dinitprintk((KERN_INFO MYNAM " :%s.ChainBuffers @ %p(%p)\n",
dinitprintk((KERN_INFO MYNAM " :%s ChainBuffers @ %p(%p)\n",
ioc->name, ioc->ChainBuffer, (void *)(ulong)ioc->ChainBufferDMA));
/* Initialize the free chain Q.
@ -3513,7 +3526,7 @@ out_fail:
*/
static int
mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req,
int replyBytes, u16 *u16reply, int maxwait, int sleepFlag)
int replyBytes, u16 *u16reply, int maxwait, int sleepFlag)
{
MPIDefaultReply_t *mptReply;
int failcnt = 0;
@ -3588,7 +3601,7 @@ mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req,
*/
if (!failcnt && (t = WaitForDoorbellReply(ioc, maxwait, sleepFlag)) < 0)
failcnt++;
dhsprintk((MYIOC_s_INFO_FMT "HandShake reply count=%d%s\n",
ioc->name, t, failcnt ? " - MISSING DOORBELL REPLY!" : ""));
@ -3747,7 +3760,7 @@ WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
}
dhsprintk((MYIOC_s_INFO_FMT "WaitCnt=%d First handshake reply word=%08x%s\n",
ioc->name, t, le32_to_cpu(*(u32 *)hs_reply),
ioc->name, t, le32_to_cpu(*(u32 *)hs_reply),
failcnt ? " - MISSING DOORBELL HANDSHAKE!" : ""));
/*
@ -3819,7 +3832,7 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
hdr.PageLength = 0;
hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_LAN;
cfg.hdr = &hdr;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
@ -3863,7 +3876,7 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
hdr.PageLength = 0;
hdr.PageNumber = 1;
hdr.PageType = MPI_CONFIG_PAGETYPE_LAN;
cfg.hdr = &hdr;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
@ -3930,7 +3943,7 @@ GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
hdr.PageLength = 0;
hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT;
cfg.hdr = &hdr;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
@ -4012,7 +4025,7 @@ GetIoUnitPage2(MPT_ADAPTER *ioc)
hdr.PageLength = 0;
hdr.PageNumber = 2;
hdr.PageType = MPI_CONFIG_PAGETYPE_IO_UNIT;
cfg.hdr = &hdr;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
@ -4102,7 +4115,7 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
header.PageLength = 0;
header.PageNumber = 0;
header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT;
cfg.hdr = &header;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = portnum;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@ -4122,6 +4135,8 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
ioc->spi_data.minSyncFactor = MPT_ASYNC;
ioc->spi_data.busType = MPT_HOST_BUS_UNKNOWN;
rc = 1;
ddvprintk((MYIOC_s_INFO_FMT "Unable to read PortPage0 minSyncFactor=%x\n",
ioc->name, ioc->spi_data.minSyncFactor));
} else {
/* Save the Port Page 0 data
*/
@ -4131,7 +4146,7 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
if ( (pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_QAS) == 0 ) {
ioc->spi_data.noQas |= MPT_TARGET_NO_NEGO_QAS;
dinitprintk((KERN_INFO MYNAM " :%s noQas due to Capabilities=%x\n",
ddvprintk((KERN_INFO MYNAM " :%s noQas due to Capabilities=%x\n",
ioc->name, pPP0->Capabilities));
}
ioc->spi_data.maxBusWidth = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_WIDE ? 1 : 0;
@ -4140,6 +4155,8 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
ioc->spi_data.maxSyncOffset = (u8) (data >> 16);
data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK;
ioc->spi_data.minSyncFactor = (u8) (data >> 8);
ddvprintk((MYIOC_s_INFO_FMT "PortPage0 minSyncFactor=%x\n",
ioc->name, ioc->spi_data.minSyncFactor));
} else {
ioc->spi_data.maxSyncOffset = 0;
ioc->spi_data.minSyncFactor = MPT_ASYNC;
@ -4152,8 +4169,11 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
if ((ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_HVD) ||
(ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_SE)) {
if (ioc->spi_data.minSyncFactor < MPT_ULTRA)
if (ioc->spi_data.minSyncFactor < MPT_ULTRA) {
ioc->spi_data.minSyncFactor = MPT_ULTRA;
ddvprintk((MYIOC_s_INFO_FMT "HVD or SE detected, minSyncFactor=%x\n",
ioc->name, ioc->spi_data.minSyncFactor));
}
}
}
if (pbuf) {
@ -4168,7 +4188,7 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
header.PageLength = 0;
header.PageNumber = 2;
header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT;
cfg.hdr = &header;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = portnum;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@ -4236,7 +4256,7 @@ mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum)
header.PageLength = 0;
header.PageNumber = 1;
header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
cfg.hdr = &header;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = portnum;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@ -4245,8 +4265,8 @@ mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum)
if (mpt_config(ioc, &cfg) != 0)
return -EFAULT;
ioc->spi_data.sdp1version = cfg.hdr->PageVersion;
ioc->spi_data.sdp1length = cfg.hdr->PageLength;
ioc->spi_data.sdp1version = cfg.cfghdr.hdr->PageVersion;
ioc->spi_data.sdp1length = cfg.cfghdr.hdr->PageLength;
header.PageVersion = 0;
header.PageLength = 0;
@ -4255,8 +4275,8 @@ mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum)
if (mpt_config(ioc, &cfg) != 0)
return -EFAULT;
ioc->spi_data.sdp0version = cfg.hdr->PageVersion;
ioc->spi_data.sdp0length = cfg.hdr->PageLength;
ioc->spi_data.sdp0version = cfg.cfghdr.hdr->PageVersion;
ioc->spi_data.sdp0length = cfg.cfghdr.hdr->PageLength;
dcprintk((MYIOC_s_INFO_FMT "Headers: 0: version %d length %d\n",
ioc->name, ioc->spi_data.sdp0version, ioc->spi_data.sdp0length));
@ -4298,7 +4318,7 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
header.PageLength = 0;
header.PageNumber = 2;
header.PageType = MPI_CONFIG_PAGETYPE_IOC;
cfg.hdr = &header;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@ -4394,7 +4414,7 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
header.PageLength = 0;
header.PageNumber = 3;
header.PageType = MPI_CONFIG_PAGETYPE_IOC;
cfg.hdr = &header;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@ -4446,7 +4466,7 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
header.PageLength = 0;
header.PageNumber = 4;
header.PageType = MPI_CONFIG_PAGETYPE_IOC;
cfg.hdr = &header;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@ -4498,7 +4518,7 @@ mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
header.PageLength = 0;
header.PageNumber = 1;
header.PageType = MPI_CONFIG_PAGETYPE_IOC;
cfg.hdr = &header;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@ -4580,13 +4600,13 @@ SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch)
evnp = (EventNotification_t *) mpt_get_msg_frame(mpt_base_index, ioc);
if (evnp == NULL) {
dprintk((MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n",
devtprintk((MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n",
ioc->name));
return 0;
}
memset(evnp, 0, sizeof(*evnp));
dprintk((MYIOC_s_INFO_FMT "Sending EventNotification(%d)\n", ioc->name, EvSwitch));
devtprintk((MYIOC_s_INFO_FMT "Sending EventNotification (%d) request %p\n", ioc->name, EvSwitch, evnp));
evnp->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
evnp->ChainOffset = 0;
@ -4610,8 +4630,10 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
EventAck_t *pAck;
if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
printk(MYIOC_s_WARN_FMT "Unable to allocate event ACK request frame!\n",
ioc->name);
printk(MYIOC_s_WARN_FMT "Unable to allocate event ACK "
"request frame for Event=%x EventContext=%x EventData=%x!\n",
ioc->name, evnp->Event, le32_to_cpu(evnp->EventContext),
le32_to_cpu(evnp->Data[0]));
return -1;
}
memset(pAck, 0, sizeof(*pAck));
@ -4647,10 +4669,11 @@ int
mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
{
Config_t *pReq;
ConfigExtendedPageHeader_t *pExtHdr = NULL;
MPT_FRAME_HDR *mf;
unsigned long flags;
int ii, rc;
u32 flagsLength;
int flagsLength;
int in_isr;
/* Prevent calling wait_event() (below), if caller happens
@ -4675,16 +4698,30 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
pReq->Reserved = 0;
pReq->ChainOffset = 0;
pReq->Function = MPI_FUNCTION_CONFIG;
/* Assume page type is not extended and clear "reserved" fields. */
pReq->ExtPageLength = 0;
pReq->ExtPageType = 0;
pReq->MsgFlags = 0;
for (ii=0; ii < 8; ii++)
pReq->Reserved2[ii] = 0;
pReq->Header.PageVersion = pCfg->hdr->PageVersion;
pReq->Header.PageLength = pCfg->hdr->PageLength;
pReq->Header.PageNumber = pCfg->hdr->PageNumber;
pReq->Header.PageType = (pCfg->hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
pReq->Header.PageVersion = pCfg->cfghdr.hdr->PageVersion;
pReq->Header.PageLength = pCfg->cfghdr.hdr->PageLength;
pReq->Header.PageNumber = pCfg->cfghdr.hdr->PageNumber;
pReq->Header.PageType = (pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) {
pExtHdr = (ConfigExtendedPageHeader_t *)pCfg->cfghdr.ehdr;
pReq->ExtPageLength = cpu_to_le16(pExtHdr->ExtPageLength);
pReq->ExtPageType = pExtHdr->ExtPageType;
pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
/* Page Length must be treated as a reserved field for the extended header. */
pReq->Header.PageLength = 0;
}
pReq->PageAddress = cpu_to_le32(pCfg->pageAddr);
/* Add a SGE to the config request.
@ -4694,13 +4731,21 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
else
flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
flagsLength |= pCfg->hdr->PageLength * 4;
if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) {
flagsLength |= pExtHdr->ExtPageLength * 4;
dcprintk((MYIOC_s_INFO_FMT "Sending Config request type %d, page %d and action %d\n",
ioc->name, pReq->ExtPageType, pReq->Header.PageNumber, pReq->Action));
}
else {
flagsLength |= pCfg->cfghdr.hdr->PageLength * 4;
dcprintk((MYIOC_s_INFO_FMT "Sending Config request type %d, page %d and action %d\n",
ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action));
}
mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr);
dcprintk((MYIOC_s_INFO_FMT "Sending Config request type %d, page %d and action %d\n",
ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action));
/* Append pCfg pointer to end of mf
*/
*((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg;
@ -4789,8 +4834,8 @@ mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
pReq->Reserved3 = 0;
pReq->NumAddressBytes = 0x01;
pReq->Reserved4 = 0;
pReq->DataLength = 0x04;
pdev = (struct pci_dev *) ioc->pcidev;
pReq->DataLength = cpu_to_le16(0x04);
pdev = ioc->pcidev;
if (pdev->devfn & 1)
pReq->DeviceAddr = 0xB2;
else
@ -5504,6 +5549,8 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
* If needed, send (a single) EventAck.
*/
if (pEventReply->AckRequired == MPI_EVENT_NOTIFICATION_ACK_REQUIRED) {
devtprintk((MYIOC_s_WARN_FMT
"EventAck required\n",ioc->name));
if ((ii = SendEventAck(ioc, pEventReply)) != 0) {
devtprintk((MYIOC_s_WARN_FMT "SendEventAck returned %d\n",
ioc->name, ii));
@ -5584,7 +5631,7 @@ mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info)
case 0x00080000:
desc = "Outbound DMA Overrun";
break;
case 0x00090000:
desc = "Task Management";
break;
@ -5600,7 +5647,7 @@ mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info)
case 0x000C0000:
desc = "Untagged Table Size";
break;
}
printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc);
@ -5692,7 +5739,7 @@ mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
break;
case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
/* This error is checked in scsi_io_done(). Skip.
/* This error is checked in scsi_io_done(). Skip.
desc = "SCSI Data Underrun";
*/
break;

View file

@ -915,7 +915,10 @@ struct scsi_cmnd;
typedef struct _x_config_parms {
struct list_head linkage; /* linked list */
struct timer_list timer; /* timer function for this request */
ConfigPageHeader_t *hdr;
union {
ConfigExtendedPageHeader_t *ehdr;
ConfigPageHeader_t *hdr;
} cfghdr;
dma_addr_t physAddr;
int wait_done; /* wait for this request */
u32 pageAddr; /* properly formatted */

View file

@ -242,7 +242,7 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
/* Set the command status to GOOD if IOC Status is GOOD
* OR if SCSI I/O cmd and data underrun or recovered error.
*/
iocStatus = reply->u.reply.IOCStatus & MPI_IOCSTATUS_MASK;
iocStatus = le16_to_cpu(reply->u.reply.IOCStatus) & MPI_IOCSTATUS_MASK;
if (iocStatus == MPI_IOCSTATUS_SUCCESS)
ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
@ -2324,7 +2324,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
hdr.PageLength = 0;
hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING;
cfg.hdr = &hdr;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@ -2333,7 +2333,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
strncpy(karg.serial_number, " ", 24);
if (mpt_config(ioc, &cfg) == 0) {
if (cfg.hdr->PageLength > 0) {
if (cfg.cfghdr.hdr->PageLength > 0) {
/* Issue the second config page request */
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
@ -2479,7 +2479,7 @@ mptctl_hp_targetinfo(unsigned long arg)
hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
cfg.hdr = &hdr;
cfg.cfghdr.hdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.dir = 0;
cfg.timeout = 0;
@ -2527,15 +2527,15 @@ mptctl_hp_targetinfo(unsigned long arg)
hdr.PageNumber = 3;
hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
cfg.hdr = &hdr;
cfg.cfghdr.hdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.timeout = 0;
cfg.physAddr = -1;
if ((mpt_config(ioc, &cfg) == 0) && (cfg.hdr->PageLength > 0)) {
if ((mpt_config(ioc, &cfg) == 0) && (cfg.cfghdr.hdr->PageLength > 0)) {
/* Issue the second config page request */
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
data_sz = (int) cfg.hdr->PageLength * 4;
data_sz = (int) cfg.cfghdr.hdr->PageLength * 4;
pg3_alloc = (SCSIDevicePage3_t *) pci_alloc_consistent(
ioc->pcidev, data_sz, &page_dma);
if (pg3_alloc) {

View file

@ -281,12 +281,12 @@ mptscsih_getFreeChainBuffer(MPT_ADAPTER *ioc, int *retIndex)
offset = (u8 *)chainBuf - (u8 *)ioc->ChainBuffer;
chain_idx = offset / ioc->req_sz;
rc = SUCCESS;
dsgprintk((MYIOC_s_INFO_FMT "getFreeChainBuffer (index %d), got buf=%p\n",
ioc->name, *retIndex, chainBuf));
dsgprintk((MYIOC_s_ERR_FMT "getFreeChainBuffer chainBuf=%p ChainBuffer=%p offset=%d chain_idx=%d\n",
ioc->name, chainBuf, ioc->ChainBuffer, offset, chain_idx));
} else {
rc = FAILED;
chain_idx = MPT_HOST_NO_CHAIN;
dfailprintk((MYIOC_s_ERR_FMT "getFreeChainBuffer failed\n",
dfailprintk((MYIOC_s_INFO_FMT "getFreeChainBuffer failed\n",
ioc->name));
}
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
@ -432,7 +432,7 @@ nextSGEset:
*/
pReq->ChainOffset = 0;
RequestNB = (((sgeOffset - 1) >> ioc->NBShiftFactor) + 1) & 0x03;
dsgprintk((MYIOC_s_ERR_FMT
dsgprintk((MYIOC_s_INFO_FMT
"Single Buffer RequestNB=%x, sgeOffset=%d\n", ioc->name, RequestNB, sgeOffset));
ioc->RequestNB[req_idx] = RequestNB;
}
@ -491,11 +491,12 @@ nextSGEset:
/* NOTE: psge points to the beginning of the chain element
* in current buffer. Get a chain buffer.
*/
dsgprintk((MYIOC_s_INFO_FMT
"calling getFreeChainBuffer SCSI cmd=%02x (%p)\n",
ioc->name, pReq->CDB[0], SCpnt));
if ((mptscsih_getFreeChainBuffer(ioc, &newIndex)) == FAILED)
if ((mptscsih_getFreeChainBuffer(ioc, &newIndex)) == FAILED) {
dfailprintk((MYIOC_s_INFO_FMT
"getFreeChainBuffer FAILED SCSI cmd=%02x (%p)\n",
ioc->name, pReq->CDB[0], SCpnt));
return FAILED;
}
/* Update the tracking arrays.
* If chainSge == NULL, update ReqToChain, else ChainToChain
@ -577,14 +578,20 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
return 1;
}
dmfprintk((MYIOC_s_INFO_FMT
"ScsiDone (mf=%p,mr=%p,sc=%p,idx=%d)\n",
ioc->name, mf, mr, sc, req_idx));
sc->result = DID_OK << 16; /* Set default reply as OK */
pScsiReq = (SCSIIORequest_t *) mf;
pScsiReply = (SCSIIOReply_t *) mr;
if((ioc->facts.MsgVersion >= MPI_VERSION_01_05) && pScsiReply){
dmfprintk((MYIOC_s_INFO_FMT
"ScsiDone (mf=%p,mr=%p,sc=%p,idx=%d,task-tag=%d)\n",
ioc->name, mf, mr, sc, req_idx, pScsiReply->TaskTag));
}else{
dmfprintk((MYIOC_s_INFO_FMT
"ScsiDone (mf=%p,mr=%p,sc=%p,idx=%d)\n",
ioc->name, mf, mr, sc, req_idx));
}
if (pScsiReply == NULL) {
/* special context reply handling */
;
@ -658,8 +665,8 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
/* Sufficient data transfer occurred */
sc->result = (DID_OK << 16) | scsi_status;
} else if ( xfer_cnt == 0 ) {
/* A CRC Error causes this condition; retry */
sc->result = (DRIVER_SENSE << 24) | (DID_OK << 16) |
/* A CRC Error causes this condition; retry */
sc->result = (DRIVER_SENSE << 24) | (DID_OK << 16) |
(CHECK_CONDITION << 1);
sc->sense_buffer[0] = 0x70;
sc->sense_buffer[2] = NO_SENSE;
@ -668,7 +675,9 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
} else {
sc->result = DID_SOFT_ERROR << 16;
}
dreplyprintk((KERN_NOTICE "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->target));
dreplyprintk((KERN_NOTICE
"RESIDUAL_MISMATCH: result=%x on id=%d\n",
sc->result, sc->device->id));
break;
case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
@ -796,7 +805,6 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
return 1;
}
/*
* mptscsih_flush_running_cmds - For each command found, search
* Scsi_Host instance taskQ and reply to OS.
@ -1017,7 +1025,7 @@ mptscsih_remove(struct pci_dev *pdev)
scsi_host_put(host);
mpt_detach(pdev);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@ -1072,7 +1080,7 @@ mptscsih_resume(struct pci_dev *pdev)
MPT_SCSI_HOST *hd;
mpt_resume(pdev);
if(!host)
return 0;
@ -1214,8 +1222,8 @@ mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t off
int size = 0;
if (func) {
/*
* write is not supported
/*
* write is not supported
*/
} else {
if (start)
@ -1535,17 +1543,17 @@ mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, in
*/
if (mptscsih_tm_pending_wait(hd) == FAILED) {
if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
dtmprintk((KERN_WARNING MYNAM ": %s: TMHandler abort: "
dtmprintk((KERN_INFO MYNAM ": %s: TMHandler abort: "
"Timed out waiting for last TM (%d) to complete! \n",
hd->ioc->name, hd->tmPending));
return FAILED;
} else if (type == MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
dtmprintk((KERN_WARNING MYNAM ": %s: TMHandler target reset: "
dtmprintk((KERN_INFO MYNAM ": %s: TMHandler target reset: "
"Timed out waiting for last TM (%d) to complete! \n",
hd->ioc->name, hd->tmPending));
return FAILED;
} else if (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) {
dtmprintk((KERN_WARNING MYNAM ": %s: TMHandler bus reset: "
dtmprintk((KERN_INFO MYNAM ": %s: TMHandler bus reset: "
"Timed out waiting for last TM (%d) to complete! \n",
hd->ioc->name, hd->tmPending));
if (hd->tmPending & (1 << MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS))
@ -1631,8 +1639,7 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun
if ((mf = mpt_get_msg_frame(hd->ioc->TaskCtx, hd->ioc)) == NULL) {
dfailprintk((MYIOC_s_ERR_FMT "IssueTaskMgmt, no msg frames!!\n",
hd->ioc->name));
//return FAILED;
return -999;
return FAILED;
}
dtmprintk((MYIOC_s_INFO_FMT "IssueTaskMgmt request @ %p\n",
hd->ioc->name, mf));
@ -1661,9 +1668,8 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun
pScsiTm->TaskMsgContext = ctx2abort;
dtmprintk((MYIOC_s_INFO_FMT
"IssueTaskMgmt: ctx2abort (0x%08x) type=%d\n",
hd->ioc->name, ctx2abort, type));
dtmprintk((MYIOC_s_INFO_FMT "IssueTaskMgmt: ctx2abort (0x%08x) type=%d\n",
hd->ioc->name, ctx2abort, type));
DBG_DUMP_TM_REQUEST_FRAME((u32 *)pScsiTm);
@ -1902,13 +1908,13 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
/* If we can't locate the host to reset, then we failed. */
if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
dtmprintk( ( KERN_WARNING MYNAM ": mptscsih_host_reset: "
dtmprintk( ( KERN_INFO MYNAM ": mptscsih_host_reset: "
"Can't locate host! (sc=%p)\n",
SCpnt ) );
return FAILED;
}
printk(KERN_WARNING MYNAM ": %s: >> Attempting host reset! (sc=%p)\n",
printk(KERN_WARNING MYNAM ": %s: Attempting host reset! (sc=%p)\n",
hd->ioc->name, SCpnt);
/* If our attempts to reset the host failed, then return a failed
@ -1924,7 +1930,7 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
hd->tmState = TM_STATE_NONE;
}
dtmprintk( ( KERN_WARNING MYNAM ": mptscsih_host_reset: "
dtmprintk( ( KERN_INFO MYNAM ": mptscsih_host_reset: "
"Status = %s\n",
(status == SUCCESS) ? "SUCCESS" : "FAILED" ) );
@ -1951,8 +1957,8 @@ mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd)
if (hd->tmState == TM_STATE_NONE) {
hd->tmState = TM_STATE_IN_PROGRESS;
hd->tmPending = 1;
status = SUCCESS;
spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
status = SUCCESS;
break;
}
spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
@ -1980,7 +1986,7 @@ mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout )
spin_lock_irqsave(&hd->ioc->FreeQlock, flags);
if(hd->tmPending == 0) {
status = SUCCESS;
spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
break;
}
spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
@ -2318,10 +2324,10 @@ mptscsih_slave_configure(struct scsi_device *device)
if (pTarget == NULL) {
/* Driver doesn't know about this device.
* Kernel may generate a "Dummy Lun 0" which
* may become a real Lun if a
* may become a real Lun if a
* "scsi add-single-device" command is executed
* while the driver is active (hot-plug a
* device). LSI Raid controllers need
* while the driver is active (hot-plug a
* device). LSI Raid controllers need
* queue_depth set to DEV_HIGH for this reason.
*/
scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
@ -2691,7 +2697,7 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *
* If the peripheral qualifier filter is enabled then if the target reports a 0x1
* (i.e. The targer is capable of supporting the specified peripheral device type
* on this logical unit; however, the physical device is not currently connected
* to this logical unit) it will be converted to a 0x3 (i.e. The target is not
* to this logical unit) it will be converted to a 0x3 (i.e. The target is not
* capable of supporting a physical device on this logical unit). This is to work
* around a bug in th emid-layer in some distributions in which the mid-layer will
* continue to try to communicate to the LUN and evntually create a dummy LUN.
@ -3194,8 +3200,8 @@ mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
/* Get a MF for this command.
*/
if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) {
dprintk((MYIOC_s_WARN_FMT "write SDP1: no msg frames!\n",
ioc->name));
dfailprintk((MYIOC_s_WARN_FMT "write SDP1: no msg frames!\n",
ioc->name));
return -EAGAIN;
}
@ -3289,7 +3295,7 @@ mptscsih_writeIOCPage4(MPT_SCSI_HOST *hd, int target_id, int bus)
/* Get a MF for this command.
*/
if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) {
dprintk((MYIOC_s_WARN_FMT "writeIOCPage4 : no msg frames!\n",
dfailprintk((MYIOC_s_WARN_FMT "writeIOCPage4 : no msg frames!\n",
ioc->name));
return -EAGAIN;
}
@ -3447,7 +3453,7 @@ mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
* some type of error occurred.
*/
MpiRaidActionReply_t *pr = (MpiRaidActionReply_t *)mr;
if (pr->ActionStatus == MPI_RAID_ACTION_ASTATUS_SUCCESS)
if (le16_to_cpu(pr->ActionStatus) == MPI_RAID_ACTION_ASTATUS_SUCCESS)
completionCode = MPT_SCANDV_GOOD;
else
completionCode = MPT_SCANDV_SOME_ERROR;
@ -3955,7 +3961,7 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum)
header1.PageLength = ioc->spi_data.sdp1length;
header1.PageNumber = 1;
header1.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
cfg.hdr = &header1;
cfg.cfghdr.hdr = &header1;
cfg.physAddr = cfg1_dma_addr;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
cfg.dir = 1;
@ -3996,9 +4002,9 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum)
dnegoprintk(("syncronize cache: id=%d width=0 factor=MPT_ASYNC "
"offset=0 negoFlags=%x request=%x config=%x\n",
id, flags, requested, configuration));
pcfg1Data->RequestedParameters = le32_to_cpu(requested);
pcfg1Data->RequestedParameters = cpu_to_le32(requested);
pcfg1Data->Reserved = 0;
pcfg1Data->Configuration = le32_to_cpu(configuration);
pcfg1Data->Configuration = cpu_to_le32(configuration);
cfg.pageAddr = (bus<<8) | id;
mpt_config(hd->ioc, &cfg);
}
@ -4353,7 +4359,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
/* Prep cfg structure
*/
cfg.pageAddr = (bus<<8) | id;
cfg.hdr = NULL;
cfg.cfghdr.hdr = NULL;
/* Prep SDP0 header
*/
@ -4399,7 +4405,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
pcfg1Data = (SCSIDevicePage1_t *) (pDvBuf + sz);
cfg1_dma_addr = dvbuf_dma + sz;
/* Skip this ID? Set cfg.hdr to force config page write
/* Skip this ID? Set cfg.cfghdr.hdr to force config page write
*/
{
ScsiCfgData *pspi_data = &hd->ioc->spi_data;
@ -4417,7 +4423,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
dv.cmd = MPT_SET_MAX;
mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
cfg.hdr = &header1;
cfg.cfghdr.hdr = &header1;
/* Save the final negotiated settings to
* SCSI device page 1.
@ -4483,7 +4489,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
dv.cmd = MPT_SET_MIN;
mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
cfg.hdr = &header1;
cfg.cfghdr.hdr = &header1;
cfg.physAddr = cfg1_dma_addr;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
cfg.dir = 1;
@ -4596,8 +4602,8 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
if ((pbuf1[56] & 0x02) == 0) {
pTarget->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS;
ddvprintk((MYIOC_s_NOTE_FMT
"DV: Start Basic noQas on id=%d due to pbuf1[56]=%x\n",
ddvprintk((MYIOC_s_NOTE_FMT
"DV: Start Basic noQas on id=%d due to pbuf1[56]=%x\n",
ioc->name, id, pbuf1[56]));
}
}
@ -4637,7 +4643,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
u32 sdp0_info;
u32 sdp0_nego;
cfg.hdr = &header0;
cfg.cfghdr.hdr = &header0;
cfg.physAddr = cfg0_dma_addr;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.dir = 0;
@ -4673,7 +4679,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
if (!firstPass)
doFallback = 1;
} else {
ddvprintk((MYIOC_s_NOTE_FMT
ddvprintk((MYIOC_s_NOTE_FMT
"DV:Inquiry compared id=%d, calling initTarget\n", ioc->name, id));
hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_DV_NOT_DONE;
mptscsih_initTarget(hd,
@ -4689,8 +4695,8 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
} else if (rc == MPT_SCANDV_ISSUE_SENSE)
doFallback = 1; /* set fallback flag */
else if ((rc == MPT_SCANDV_DID_RESET) ||
(rc == MPT_SCANDV_SENSE) ||
else if ((rc == MPT_SCANDV_DID_RESET) ||
(rc == MPT_SCANDV_SENSE) ||
(rc == MPT_SCANDV_FALLBACK))
doFallback = 1; /* set fallback flag */
else
@ -4722,7 +4728,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
* 4) release
* 5) update nego parms to target struct
*/
cfg.hdr = &header1;
cfg.cfghdr.hdr = &header1;
cfg.physAddr = cfg1_dma_addr;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
cfg.dir = 1;
@ -5121,12 +5127,12 @@ target_done:
/* Set if cfg1_dma_addr contents is valid
*/
if ((cfg.hdr != NULL) && (retcode == 0)){
if ((cfg.cfghdr.hdr != NULL) && (retcode == 0)){
/* If disk, not U320, disable QAS
*/
if ((inq0 == 0) && (dv.now.factor > MPT_ULTRA320)) {
hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS;
ddvprintk((MYIOC_s_NOTE_FMT
ddvprintk((MYIOC_s_NOTE_FMT
"noQas set due to id=%d has factor=%x\n", ioc->name, id, dv.now.factor));
}
@ -5137,7 +5143,7 @@ target_done:
* skip save of the final negotiated settings to
* SCSI device page 1.
*
cfg.hdr = &header1;
cfg.cfghdr.hdr = &header1;
cfg.physAddr = cfg1_dma_addr;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
cfg.dir = 1;
@ -5248,7 +5254,7 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
/* Update tmax values with those from Device Page 0.*/
pPage0 = (SCSIDevicePage0_t *) pPage;
if (pPage0) {
val = cpu_to_le32(pPage0->NegotiatedParameters);
val = le32_to_cpu(pPage0->NegotiatedParameters);
dv->max.width = val & MPI_SCSIDEVPAGE0_NP_WIDE ? 1 : 0;
dv->max.offset = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) >> 16;
dv->max.factor = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8;
@ -5276,12 +5282,12 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
dv->now.offset, &val, &configuration, dv->now.flags);
dnegoprintk(("Setting Max: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n",
id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags, val, configuration));
pPage1->RequestedParameters = le32_to_cpu(val);
pPage1->RequestedParameters = cpu_to_le32(val);
pPage1->Reserved = 0;
pPage1->Configuration = le32_to_cpu(configuration);
pPage1->Configuration = cpu_to_le32(configuration);
}
ddvprintk(("id=%d width=%d factor=%x offset=%x flags=%x request=%x configuration=%x\n",
ddvprintk(("id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x configuration=%x\n",
id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags, val, configuration));
break;
@ -5301,9 +5307,9 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
offset, &val, &configuration, negoFlags);
dnegoprintk(("Setting Min: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n",
id, width, factor, offset, negoFlags, val, configuration));
pPage1->RequestedParameters = le32_to_cpu(val);
pPage1->RequestedParameters = cpu_to_le32(val);
pPage1->Reserved = 0;
pPage1->Configuration = le32_to_cpu(configuration);
pPage1->Configuration = cpu_to_le32(configuration);
}
ddvprintk(("id=%d width=%d factor=%x offset=%x request=%x config=%x negoFlags=%x\n",
id, width, factor, offset, val, configuration, negoFlags));
@ -5377,12 +5383,12 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
if (pPage1) {
mptscsih_setDevicePage1Flags (width, factor, offset, &val,
&configuration, dv->now.flags);
dnegoprintk(("Finish: id=%d width=%d offset=%d factor=%x flags=%x request=%x config=%x\n",
dnegoprintk(("Finish: id=%d width=%d offset=%d factor=%x negoFlags=%x request=%x config=%x\n",
id, width, offset, factor, dv->now.flags, val, configuration));
pPage1->RequestedParameters = le32_to_cpu(val);
pPage1->RequestedParameters = cpu_to_le32(val);
pPage1->Reserved = 0;
pPage1->Configuration = le32_to_cpu(configuration);
pPage1->Configuration = cpu_to_le32(configuration);
}
ddvprintk(("Finish: id=%d offset=%d factor=%x width=%d request=%x config=%x\n",

View file

@ -162,15 +162,15 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
u8 *mem;
int error=0;
int r;
if ((r = mpt_attach(pdev,id)) != 0)
return r;
ioc = pci_get_drvdata(pdev);
ioc->DoneCtx = mptspiDoneCtx;
ioc->TaskCtx = mptspiTaskCtx;
ioc->InternalCtx = mptspiInternalCtx;
/* Added sanity check on readiness of the MPT adapter.
*/
if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {

View file

@ -1,5 +1,11 @@
menu "SCSI device support"
config RAID_ATTRS
tristate "RAID Transport Class"
default n
---help---
Provides RAID
config SCSI
tristate "SCSI device support"
---help---

View file

@ -22,6 +22,8 @@ subdir-$(CONFIG_PCMCIA) += pcmcia
obj-$(CONFIG_SCSI) += scsi_mod.o
obj-$(CONFIG_RAID_ATTRS) += raid_class.o
# --- NOTE ORDERING HERE ---
# For kernel non-modular link, transport attributes need to
# be initialised before drivers

View file

@ -972,7 +972,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
fibsize = sizeof(struct aac_read64) +
((le32_to_cpu(readcmd->sg.count) - 1) *
sizeof (struct sgentry64));
BUG_ON (fibsize > (sizeof(struct hw_fib) -
BUG_ON (fibsize > (dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter

View file

@ -48,12 +48,6 @@
static struct scsi_transport_template *ahd_linux_transport_template = NULL;
/*
* Include aiclib.c as part of our
* "module dependencies are hard" work around.
*/
#include "aiclib.c"
#include <linux/init.h> /* __setup */
#include <linux/mm.h> /* For fetching system memory size */
#include <linux/blkdev.h> /* For block_size() */
@ -372,8 +366,6 @@ static int ahd_linux_run_command(struct ahd_softc*,
struct ahd_linux_device *,
struct scsi_cmnd *);
static void ahd_linux_setup_tag_info_global(char *p);
static aic_option_callback_t ahd_linux_setup_tag_info;
static aic_option_callback_t ahd_linux_setup_iocell_info;
static int aic79xx_setup(char *c);
static int ahd_linux_unit;
@ -907,6 +899,86 @@ ahd_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
}
}
static char *
ahd_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
void (*callback)(u_long, int, int, int32_t),
u_long callback_arg)
{
char *tok_end;
char *tok_end2;
int i;
int instance;
int targ;
int done;
char tok_list[] = {'.', ',', '{', '}', '\0'};
/* All options use a ':' name/arg separator */
if (*opt_arg != ':')
return (opt_arg);
opt_arg++;
instance = -1;
targ = -1;
done = FALSE;
/*
* Restore separator that may be in
* the middle of our option argument.
*/
tok_end = strchr(opt_arg, '\0');
if (tok_end < end)
*tok_end = ',';
while (!done) {
switch (*opt_arg) {
case '{':
if (instance == -1) {
instance = 0;
} else {
if (depth > 1) {
if (targ == -1)
targ = 0;
} else {
printf("Malformed Option %s\n",
opt_name);
done = TRUE;
}
}
opt_arg++;
break;
case '}':
if (targ != -1)
targ = -1;
else if (instance != -1)
instance = -1;
opt_arg++;
break;
case ',':
case '.':
if (instance == -1)
done = TRUE;
else if (targ >= 0)
targ++;
else if (instance >= 0)
instance++;
opt_arg++;
break;
case '\0':
done = TRUE;
break;
default:
tok_end = end;
for (i = 0; tok_list[i]; i++) {
tok_end2 = strchr(opt_arg, tok_list[i]);
if ((tok_end2) && (tok_end2 < tok_end))
tok_end = tok_end2;
}
callback(callback_arg, instance, targ,
simple_strtol(opt_arg, NULL, 0));
opt_arg = tok_end;
break;
}
}
return (opt_arg);
}
/*
* Handle Linux boot parameters. This routine allows for assigning a value
* to a parameter with a ':' between the parameter and the value.
@ -964,18 +1036,18 @@ aic79xx_setup(char *s)
if (strncmp(p, "global_tag_depth", n) == 0) {
ahd_linux_setup_tag_info_global(p + n);
} else if (strncmp(p, "tag_info", n) == 0) {
s = aic_parse_brace_option("tag_info", p + n, end,
s = ahd_parse_brace_option("tag_info", p + n, end,
2, ahd_linux_setup_tag_info, 0);
} else if (strncmp(p, "slewrate", n) == 0) {
s = aic_parse_brace_option("slewrate",
s = ahd_parse_brace_option("slewrate",
p + n, end, 1, ahd_linux_setup_iocell_info,
AIC79XX_SLEWRATE_INDEX);
} else if (strncmp(p, "precomp", n) == 0) {
s = aic_parse_brace_option("precomp",
s = ahd_parse_brace_option("precomp",
p + n, end, 1, ahd_linux_setup_iocell_info,
AIC79XX_PRECOMP_INDEX);
} else if (strncmp(p, "amplitude", n) == 0) {
s = aic_parse_brace_option("amplitude",
s = ahd_parse_brace_option("amplitude",
p + n, end, 1, ahd_linux_setup_iocell_info,
AIC79XX_AMPLITUDE_INDEX);
} else if (p[n] == ':') {

View file

@ -53,6 +53,49 @@ static void ahd_dump_device_state(struct info_str *info,
static int ahd_proc_write_seeprom(struct ahd_softc *ahd,
char *buffer, int length);
/*
* Table of syncrates that don't follow the "divisible by 4"
* rule. This table will be expanded in future SCSI specs.
*/
static struct {
u_int period_factor;
u_int period; /* in 100ths of ns */
} scsi_syncrates[] = {
{ 0x08, 625 }, /* FAST-160 */
{ 0x09, 1250 }, /* FAST-80 */
{ 0x0a, 2500 }, /* FAST-40 40MHz */
{ 0x0b, 3030 }, /* FAST-40 33MHz */
{ 0x0c, 5000 } /* FAST-20 */
};
/*
* Return the frequency in kHz corresponding to the given
* sync period factor.
*/
static u_int
ahd_calc_syncsrate(u_int period_factor)
{
int i;
int num_syncrates;
num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]);
/* See if the period is in the "exception" table */
for (i = 0; i < num_syncrates; i++) {
if (period_factor == scsi_syncrates[i].period_factor) {
/* Period in kHz */
return (100000000 / scsi_syncrates[i].period);
}
}
/*
* Wasn't in the table, so use the standard
* 4 times conversion.
*/
return (10000000 / (period_factor * 4 * 10));
}
static void
copy_mem_info(struct info_str *info, char *data, int len)
{
@ -109,7 +152,7 @@ ahd_format_transinfo(struct info_str *info, struct ahd_transinfo *tinfo)
speed = 3300;
freq = 0;
if (tinfo->offset != 0) {
freq = aic_calc_syncsrate(tinfo->period);
freq = ahd_calc_syncsrate(tinfo->period);
speed = freq;
}
speed *= (0x01 << tinfo->width);

View file

@ -125,12 +125,6 @@
static struct scsi_transport_template *ahc_linux_transport_template = NULL;
/*
* Include aiclib.c as part of our
* "module dependencies are hard" work around.
*/
#include "aiclib.c"
#include <linux/init.h> /* __setup */
#include <linux/mm.h> /* For fetching system memory size */
#include <linux/blkdev.h> /* For block_size() */
@ -391,7 +385,6 @@ static int ahc_linux_run_command(struct ahc_softc*,
struct ahc_linux_device *,
struct scsi_cmnd *);
static void ahc_linux_setup_tag_info_global(char *p);
static aic_option_callback_t ahc_linux_setup_tag_info;
static int aic7xxx_setup(char *s);
static int ahc_linux_unit;
@ -920,6 +913,86 @@ ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
}
}
static char *
ahc_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
void (*callback)(u_long, int, int, int32_t),
u_long callback_arg)
{
char *tok_end;
char *tok_end2;
int i;
int instance;
int targ;
int done;
char tok_list[] = {'.', ',', '{', '}', '\0'};
/* All options use a ':' name/arg separator */
if (*opt_arg != ':')
return (opt_arg);
opt_arg++;
instance = -1;
targ = -1;
done = FALSE;
/*
* Restore separator that may be in
* the middle of our option argument.
*/
tok_end = strchr(opt_arg, '\0');
if (tok_end < end)
*tok_end = ',';
while (!done) {
switch (*opt_arg) {
case '{':
if (instance == -1) {
instance = 0;
} else {
if (depth > 1) {
if (targ == -1)
targ = 0;
} else {
printf("Malformed Option %s\n",
opt_name);
done = TRUE;
}
}
opt_arg++;
break;
case '}':
if (targ != -1)
targ = -1;
else if (instance != -1)
instance = -1;
opt_arg++;
break;
case ',':
case '.':
if (instance == -1)
done = TRUE;
else if (targ >= 0)
targ++;
else if (instance >= 0)
instance++;
opt_arg++;
break;
case '\0':
done = TRUE;
break;
default:
tok_end = end;
for (i = 0; tok_list[i]; i++) {
tok_end2 = strchr(opt_arg, tok_list[i]);
if ((tok_end2) && (tok_end2 < tok_end))
tok_end = tok_end2;
}
callback(callback_arg, instance, targ,
simple_strtol(opt_arg, NULL, 0));
opt_arg = tok_end;
break;
}
}
return (opt_arg);
}
/*
* Handle Linux boot parameters. This routine allows for assigning a value
* to a parameter with a ':' between the parameter and the value.
@ -974,7 +1047,7 @@ aic7xxx_setup(char *s)
if (strncmp(p, "global_tag_depth", n) == 0) {
ahc_linux_setup_tag_info_global(p + n);
} else if (strncmp(p, "tag_info", n) == 0) {
s = aic_parse_brace_option("tag_info", p + n, end,
s = ahc_parse_brace_option("tag_info", p + n, end,
2, ahc_linux_setup_tag_info, 0);
} else if (p[n] == ':') {
*(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);

View file

@ -54,6 +54,49 @@ static void ahc_dump_device_state(struct info_str *info,
static int ahc_proc_write_seeprom(struct ahc_softc *ahc,
char *buffer, int length);
/*
* Table of syncrates that don't follow the "divisible by 4"
* rule. This table will be expanded in future SCSI specs.
*/
static struct {
u_int period_factor;
u_int period; /* in 100ths of ns */
} scsi_syncrates[] = {
{ 0x08, 625 }, /* FAST-160 */
{ 0x09, 1250 }, /* FAST-80 */
{ 0x0a, 2500 }, /* FAST-40 40MHz */
{ 0x0b, 3030 }, /* FAST-40 33MHz */
{ 0x0c, 5000 } /* FAST-20 */
};
/*
* Return the frequency in kHz corresponding to the given
* sync period factor.
*/
static u_int
ahc_calc_syncsrate(u_int period_factor)
{
int i;
int num_syncrates;
num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]);
/* See if the period is in the "exception" table */
for (i = 0; i < num_syncrates; i++) {
if (period_factor == scsi_syncrates[i].period_factor) {
/* Period in kHz */
return (100000000 / scsi_syncrates[i].period);
}
}
/*
* Wasn't in the table, so use the standard
* 4 times conversion.
*/
return (10000000 / (period_factor * 4 * 10));
}
static void
copy_mem_info(struct info_str *info, char *data, int len)
{
@ -106,7 +149,7 @@ ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo)
speed = 3300;
freq = 0;
if (tinfo->offset != 0) {
freq = aic_calc_syncsrate(tinfo->period);
freq = ahc_calc_syncsrate(tinfo->period);
speed = freq;
}
speed *= (0x01 << tinfo->width);

View file

@ -32,124 +32,3 @@
#include "aiclib.h"
/*
* Table of syncrates that don't follow the "divisible by 4"
* rule. This table will be expanded in future SCSI specs.
*/
static struct {
u_int period_factor;
u_int period; /* in 100ths of ns */
} scsi_syncrates[] = {
{ 0x08, 625 }, /* FAST-160 */
{ 0x09, 1250 }, /* FAST-80 */
{ 0x0a, 2500 }, /* FAST-40 40MHz */
{ 0x0b, 3030 }, /* FAST-40 33MHz */
{ 0x0c, 5000 } /* FAST-20 */
};
/*
* Return the frequency in kHz corresponding to the given
* sync period factor.
*/
u_int
aic_calc_syncsrate(u_int period_factor)
{
int i;
int num_syncrates;
num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]);
/* See if the period is in the "exception" table */
for (i = 0; i < num_syncrates; i++) {
if (period_factor == scsi_syncrates[i].period_factor) {
/* Period in kHz */
return (100000000 / scsi_syncrates[i].period);
}
}
/*
* Wasn't in the table, so use the standard
* 4 times conversion.
*/
return (10000000 / (period_factor * 4 * 10));
}
char *
aic_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
aic_option_callback_t *callback, u_long callback_arg)
{
char *tok_end;
char *tok_end2;
int i;
int instance;
int targ;
int done;
char tok_list[] = {'.', ',', '{', '}', '\0'};
/* All options use a ':' name/arg separator */
if (*opt_arg != ':')
return (opt_arg);
opt_arg++;
instance = -1;
targ = -1;
done = FALSE;
/*
* Restore separator that may be in
* the middle of our option argument.
*/
tok_end = strchr(opt_arg, '\0');
if (tok_end < end)
*tok_end = ',';
while (!done) {
switch (*opt_arg) {
case '{':
if (instance == -1) {
instance = 0;
} else {
if (depth > 1) {
if (targ == -1)
targ = 0;
} else {
printf("Malformed Option %s\n",
opt_name);
done = TRUE;
}
}
opt_arg++;
break;
case '}':
if (targ != -1)
targ = -1;
else if (instance != -1)
instance = -1;
opt_arg++;
break;
case ',':
case '.':
if (instance == -1)
done = TRUE;
else if (targ >= 0)
targ++;
else if (instance >= 0)
instance++;
opt_arg++;
break;
case '\0':
done = TRUE;
break;
default:
tok_end = end;
for (i = 0; tok_list[i]; i++) {
tok_end2 = strchr(opt_arg, tok_list[i]);
if ((tok_end2) && (tok_end2 < tok_end))
tok_end = tok_end2;
}
callback(callback_arg, instance, targ,
simple_strtol(opt_arg, NULL, 0));
opt_arg = tok_end;
break;
}
}
return (opt_arg);
}

View file

@ -141,28 +141,6 @@ aic_sector_div(sector_t capacity, int heads, int sectors)
return (int)capacity;
}
/**************************** Module Library Hack *****************************/
/*
* What we'd like to do is have a single "scsi library" module that both the
* aic7xxx and aic79xx drivers could load and depend on. A cursory examination
* of implementing module dependencies in Linux (handling the install and
* initrd cases) does not look promissing. For now, we just duplicate this
* code in both drivers using a simple symbol renaming scheme that hides this
* hack from the drivers.
*/
#define AIC_LIB_ENTRY_CONCAT(x, prefix) prefix ## x
#define AIC_LIB_ENTRY_EXPAND(x, prefix) AIC_LIB_ENTRY_CONCAT(x, prefix)
#define AIC_LIB_ENTRY(x) AIC_LIB_ENTRY_EXPAND(x, AIC_LIB_PREFIX)
#define aic_calc_syncsrate AIC_LIB_ENTRY(_calc_syncrate)
u_int aic_calc_syncsrate(u_int /*period_factor*/);
typedef void aic_option_callback_t(u_long, int, int, int32_t);
char * aic_parse_brace_option(char *opt_name, char *opt_arg,
char *end, int depth,
aic_option_callback_t *, u_long);
static __inline uint32_t
scsi_4btoul(uint8_t *bytes)
{

View file

@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/init.h>
@ -225,15 +226,8 @@ static void scsi_host_dev_release(struct device *dev)
struct Scsi_Host *shost = dev_to_shost(dev);
struct device *parent = dev->parent;
if (shost->ehandler) {
DECLARE_COMPLETION(sem);
shost->eh_notify = &sem;
shost->eh_kill = 1;
up(shost->eh_wait);
wait_for_completion(&sem);
shost->eh_notify = NULL;
}
if (shost->ehandler)
kthread_stop(shost->ehandler);
if (shost->work_q)
destroy_workqueue(shost->work_q);
@ -263,7 +257,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
{
struct Scsi_Host *shost;
int gfp_mask = GFP_KERNEL, rval;
DECLARE_COMPLETION(complete);
if (sht->unchecked_isa_dma && privsize)
gfp_mask |= __GFP_DMA;
@ -369,12 +362,12 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
snprintf(shost->shost_classdev.class_id, BUS_ID_SIZE, "host%d",
shost->host_no);
shost->eh_notify = &complete;
rval = kernel_thread(scsi_error_handler, shost, 0);
if (rval < 0)
shost->ehandler = kthread_run(scsi_error_handler, shost,
"scsi_eh_%d", shost->host_no);
if (IS_ERR(shost->ehandler)) {
rval = PTR_ERR(shost->ehandler);
goto fail_destroy_freelist;
wait_for_completion(&complete);
shost->eh_notify = NULL;
}
scsi_proc_hostdir_add(shost->hostt);
return shost;

View file

@ -87,7 +87,7 @@ static int max_channel = 3;
static int init_timeout = 5;
static int max_requests = 50;
#define IBMVSCSI_VERSION "1.5.6"
#define IBMVSCSI_VERSION "1.5.7"
MODULE_DESCRIPTION("IBM Virtual SCSI");
MODULE_AUTHOR("Dave Boutcher");
@ -145,6 +145,8 @@ static int initialize_event_pool(struct event_pool *pool,
sizeof(*evt->xfer_iu) * i;
evt->xfer_iu = pool->iu_storage + i;
evt->hostdata = hostdata;
evt->ext_list = NULL;
evt->ext_list_token = 0;
}
return 0;
@ -161,9 +163,16 @@ static void release_event_pool(struct event_pool *pool,
struct ibmvscsi_host_data *hostdata)
{
int i, in_use = 0;
for (i = 0; i < pool->size; ++i)
for (i = 0; i < pool->size; ++i) {
if (atomic_read(&pool->events[i].free) != 1)
++in_use;
if (pool->events[i].ext_list) {
dma_free_coherent(hostdata->dev,
SG_ALL * sizeof(struct memory_descriptor),
pool->events[i].ext_list,
pool->events[i].ext_list_token);
}
}
if (in_use)
printk(KERN_WARNING
"ibmvscsi: releasing event pool with %d "
@ -286,24 +295,41 @@ static void set_srp_direction(struct scsi_cmnd *cmd,
} else {
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
srp_cmd->data_out_format = SRP_INDIRECT_BUFFER;
srp_cmd->data_out_count = numbuf;
srp_cmd->data_out_count =
numbuf < MAX_INDIRECT_BUFS ?
numbuf: MAX_INDIRECT_BUFS;
} else {
srp_cmd->data_in_format = SRP_INDIRECT_BUFFER;
srp_cmd->data_in_count = numbuf;
srp_cmd->data_in_count =
numbuf < MAX_INDIRECT_BUFS ?
numbuf: MAX_INDIRECT_BUFS;
}
}
}
static void unmap_sg_list(int num_entries,
struct device *dev,
struct memory_descriptor *md)
{
int i;
for (i = 0; i < num_entries; ++i) {
dma_unmap_single(dev,
md[i].virtual_address,
md[i].length, DMA_BIDIRECTIONAL);
}
}
/**
* unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
* @cmd: srp_cmd whose additional_data member will be unmapped
* @dev: device for which the memory is mapped
*
*/
static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev)
static void unmap_cmd_data(struct srp_cmd *cmd,
struct srp_event_struct *evt_struct,
struct device *dev)
{
int i;
if ((cmd->data_out_format == SRP_NO_BUFFER) &&
(cmd->data_in_format == SRP_NO_BUFFER))
return;
@ -318,15 +344,34 @@ static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev)
(struct indirect_descriptor *)cmd->additional_data;
int num_mapped = indirect->head.length /
sizeof(indirect->list[0]);
for (i = 0; i < num_mapped; ++i) {
struct memory_descriptor *data = &indirect->list[i];
dma_unmap_single(dev,
data->virtual_address,
data->length, DMA_BIDIRECTIONAL);
if (num_mapped <= MAX_INDIRECT_BUFS) {
unmap_sg_list(num_mapped, dev, &indirect->list[0]);
return;
}
unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
}
}
static int map_sg_list(int num_entries,
struct scatterlist *sg,
struct memory_descriptor *md)
{
int i;
u64 total_length = 0;
for (i = 0; i < num_entries; ++i) {
struct memory_descriptor *descr = md + i;
struct scatterlist *sg_entry = &sg[i];
descr->virtual_address = sg_dma_address(sg_entry);
descr->length = sg_dma_len(sg_entry);
descr->memory_handle = 0;
total_length += sg_dma_len(sg_entry);
}
return total_length;
}
/**
* map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
* @cmd: Scsi_Cmnd with the scatterlist
@ -337,10 +382,11 @@ static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev)
* Returns 1 on success.
*/
static int map_sg_data(struct scsi_cmnd *cmd,
struct srp_event_struct *evt_struct,
struct srp_cmd *srp_cmd, struct device *dev)
{
int i, sg_mapped;
int sg_mapped;
u64 total_length = 0;
struct scatterlist *sg = cmd->request_buffer;
struct memory_descriptor *data =
@ -363,27 +409,46 @@ static int map_sg_data(struct scsi_cmnd *cmd,
return 1;
}
if (sg_mapped > MAX_INDIRECT_BUFS) {
if (sg_mapped > SG_ALL) {
printk(KERN_ERR
"ibmvscsi: More than %d mapped sg entries, got %d\n",
MAX_INDIRECT_BUFS, sg_mapped);
SG_ALL, sg_mapped);
return 0;
}
indirect->head.virtual_address = 0;
indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
indirect->head.memory_handle = 0;
for (i = 0; i < sg_mapped; ++i) {
struct memory_descriptor *descr = &indirect->list[i];
struct scatterlist *sg_entry = &sg[i];
descr->virtual_address = sg_dma_address(sg_entry);
descr->length = sg_dma_len(sg_entry);
descr->memory_handle = 0;
total_length += sg_dma_len(sg_entry);
}
indirect->total_length = total_length;
return 1;
if (sg_mapped <= MAX_INDIRECT_BUFS) {
total_length = map_sg_list(sg_mapped, sg, &indirect->list[0]);
indirect->total_length = total_length;
return 1;
}
/* get indirect table */
if (!evt_struct->ext_list) {
evt_struct->ext_list =(struct memory_descriptor*)
dma_alloc_coherent(dev,
SG_ALL * sizeof(struct memory_descriptor),
&evt_struct->ext_list_token, 0);
if (!evt_struct->ext_list) {
printk(KERN_ERR
"ibmvscsi: Can't allocate memory for indirect table\n");
return 0;
}
}
total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);
indirect->total_length = total_length;
indirect->head.virtual_address = evt_struct->ext_list_token;
indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
memcpy(indirect->list, evt_struct->ext_list,
MAX_INDIRECT_BUFS * sizeof(struct memory_descriptor));
return 1;
}
/**
@ -428,6 +493,7 @@ static int map_single_data(struct scsi_cmnd *cmd,
* Returns 1 on success.
*/
static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
struct srp_event_struct *evt_struct,
struct srp_cmd *srp_cmd, struct device *dev)
{
switch (cmd->sc_data_direction) {
@ -450,7 +516,7 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
if (!cmd->request_buffer)
return 1;
if (cmd->use_sg)
return map_sg_data(cmd, srp_cmd, dev);
return map_sg_data(cmd, evt_struct, srp_cmd, dev);
return map_single_data(cmd, srp_cmd, dev);
}
@ -486,6 +552,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
printk(KERN_WARNING
"ibmvscsi: Warning, request_limit exceeded\n");
unmap_cmd_data(&evt_struct->iu.srp.cmd,
evt_struct,
hostdata->dev);
free_event_struct(&hostdata->pool, evt_struct);
return SCSI_MLQUEUE_HOST_BUSY;
@ -513,7 +580,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
return 0;
send_error:
unmap_cmd_data(&evt_struct->iu.srp.cmd, hostdata->dev);
unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
if ((cmnd = evt_struct->cmnd) != NULL) {
cmnd->result = DID_ERROR << 16;
@ -551,6 +618,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
rsp->sense_and_response_data,
rsp->sense_data_list_length);
unmap_cmd_data(&evt_struct->iu.srp.cmd,
evt_struct,
evt_struct->hostdata->dev);
if (rsp->doover)
@ -583,6 +651,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
{
struct srp_cmd *srp_cmd;
struct srp_event_struct *evt_struct;
struct indirect_descriptor *indirect;
struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
u16 lun = lun_from_dev(cmnd->device);
@ -591,6 +660,19 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
if (!evt_struct)
return SCSI_MLQUEUE_HOST_BUSY;
/* Set up the actual SRP IU */
srp_cmd = &evt_struct->iu.srp.cmd;
memset(srp_cmd, 0x00, sizeof(*srp_cmd));
srp_cmd->type = SRP_CMD_TYPE;
memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
srp_cmd->lun = ((u64) lun) << 48;
if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n");
free_event_struct(&hostdata->pool, evt_struct);
return SCSI_MLQUEUE_HOST_BUSY;
}
init_event_struct(evt_struct,
handle_cmd_rsp,
VIOSRP_SRP_FORMAT,
@ -599,24 +681,11 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
evt_struct->cmnd = cmnd;
evt_struct->cmnd_done = done;
/* Set up the actual SRP IU */
srp_cmd = &evt_struct->iu.srp.cmd;
memset(srp_cmd, 0x00, sizeof(*srp_cmd));
srp_cmd->type = SRP_CMD_TYPE;
memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
srp_cmd->lun = ((u64) lun) << 48;
if (!map_data_for_srp_cmd(cmnd, srp_cmd, hostdata->dev)) {
printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n");
free_event_struct(&hostdata->pool, evt_struct);
return SCSI_MLQUEUE_HOST_BUSY;
}
/* Fix up dma address of the buffer itself */
if ((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) ||
(srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) {
struct indirect_descriptor *indirect =
(struct indirect_descriptor *)srp_cmd->additional_data;
indirect = (struct indirect_descriptor *)srp_cmd->additional_data;
if (((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) ||
(srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) &&
(indirect->head.virtual_address == 0)) {
indirect->head.virtual_address = evt_struct->crq.IU_data_ptr +
offsetof(struct srp_cmd, additional_data) +
offsetof(struct indirect_descriptor, list);
@ -931,7 +1000,8 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
cmd->result = (DID_ABORT << 16);
list_del(&found_evt->list);
unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt->hostdata->dev);
unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
found_evt->hostdata->dev);
free_event_struct(&found_evt->hostdata->pool, found_evt);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
atomic_inc(&hostdata->request_limit);
@ -1023,7 +1093,8 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
if (tmp_evt->cmnd)
tmp_evt->cmnd->result = (DID_RESET << 16);
list_del(&tmp_evt->list);
unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt->hostdata->dev);
unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
tmp_evt->hostdata->dev);
free_event_struct(&tmp_evt->hostdata->pool,
tmp_evt);
atomic_inc(&hostdata->request_limit);
@ -1052,6 +1123,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata)
if (tmp_evt->cmnd) {
tmp_evt->cmnd->result = (DID_ERROR << 16);
unmap_cmd_data(&tmp_evt->iu.srp.cmd,
tmp_evt,
tmp_evt->hostdata->dev);
if (tmp_evt->cmnd_done)
tmp_evt->cmnd_done(tmp_evt->cmnd);
@ -1356,7 +1428,7 @@ static struct scsi_host_template driver_template = {
.cmd_per_lun = 16,
.can_queue = 1, /* Updated after SRP_LOGIN */
.this_id = -1,
.sg_tablesize = MAX_INDIRECT_BUFS,
.sg_tablesize = SG_ALL,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = ibmvscsi_attrs,
};

View file

@ -68,6 +68,8 @@ struct srp_event_struct {
void (*cmnd_done) (struct scsi_cmnd *);
struct completion comp;
union viosrp_iu *sync_srp;
struct memory_descriptor *ext_list;
dma_addr_t ext_list_token;
};
/* a pool of event structs for use */

View file

@ -211,6 +211,138 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr);
}
/* Scsi_Host attributes. */
static ssize_t
qla2x00_drvr_version_show(struct class_device *cdev, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
}
static ssize_t
qla2x00_fw_version_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
char fw_str[30];
return snprintf(buf, PAGE_SIZE, "%s\n",
ha->isp_ops.fw_version_str(ha, fw_str));
}
static ssize_t
qla2x00_serial_num_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
uint32_t sn;
sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
sn % 100000);
}
static ssize_t
qla2x00_isp_name_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
return snprintf(buf, PAGE_SIZE, "%s\n", ha->brd_info->isp_name);
}
static ssize_t
qla2x00_isp_id_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
ha->product_id[0], ha->product_id[1], ha->product_id[2],
ha->product_id[3]);
}
static ssize_t
qla2x00_model_name_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_number);
}
static ssize_t
qla2x00_model_desc_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
return snprintf(buf, PAGE_SIZE, "%s\n",
ha->model_desc ? ha->model_desc: "");
}
static ssize_t
qla2x00_pci_info_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
char pci_info[30];
return snprintf(buf, PAGE_SIZE, "%s\n",
ha->isp_ops.pci_info_str(ha, pci_info));
}
static ssize_t
qla2x00_state_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
int len = 0;
if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
atomic_read(&ha->loop_state) == LOOP_DEAD)
len = snprintf(buf, PAGE_SIZE, "Link Down\n");
else if (atomic_read(&ha->loop_state) != LOOP_READY ||
test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) ||
test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags))
len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
else {
len = snprintf(buf, PAGE_SIZE, "Link Up - ");
switch (ha->current_topology) {
case ISP_CFG_NL:
len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
break;
case ISP_CFG_FL:
len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
break;
case ISP_CFG_N:
len += snprintf(buf + len, PAGE_SIZE-len,
"N_Port to N_Port\n");
break;
case ISP_CFG_F:
len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
break;
default:
len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
break;
}
}
return len;
}
static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show,
NULL);
static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static CLASS_DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
static CLASS_DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
static CLASS_DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
static CLASS_DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
static CLASS_DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
static CLASS_DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
static CLASS_DEVICE_ATTR(state, S_IRUGO, qla2x00_state_show, NULL);
struct class_device_attribute *qla2x00_host_attrs[] = {
&class_device_attr_driver_version,
&class_device_attr_fw_version,
&class_device_attr_serial_num,
&class_device_attr_isp_name,
&class_device_attr_isp_id,
&class_device_attr_model_name,
&class_device_attr_model_desc,
&class_device_attr_pci_info,
&class_device_attr_state,
NULL,
};
/* Host attributes. */
static void
@ -304,10 +436,13 @@ struct fc_function_template qla2xxx_transport_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.get_host_port_id = qla2x00_get_host_port_id,
.show_host_port_id = 1,
.dd_fcrport_size = sizeof(struct fc_port *),
.show_rport_supported_classes = 1,
.get_starget_node_name = qla2x00_get_starget_node_name,
.show_starget_node_name = 1,
@ -329,4 +464,5 @@ qla2x00_init_host_attr(scsi_qla_host_t *ha)
be64_to_cpu(*(uint64_t *)ha->init_cb->node_name);
fc_host_port_name(ha->host) =
be64_to_cpu(*(uint64_t *)ha->init_cb->port_name);
fc_host_supported_classes(ha->host) = FC_COS_CLASS3;
}

View file

@ -81,6 +81,7 @@
#define DEBUG2_3_11(x) do {x;} while (0);
#define DEBUG2_9_10(x) do {x;} while (0);
#define DEBUG2_11(x) do {x;} while (0);
#define DEBUG2_13(x) do {x;} while (0);
#else
#define DEBUG2(x) do {} while (0);
#endif
@ -169,8 +170,14 @@
#if defined(QL_DEBUG_LEVEL_13)
#define DEBUG13(x) do {x;} while (0)
#if !defined(DEBUG2_13)
#define DEBUG2_13(x) do {x;} while(0)
#endif
#else
#define DEBUG13(x) do {} while (0)
#if !defined(QL_DEBUG_LEVEL_2)
#define DEBUG2_13(x) do {} while(0)
#endif
#endif
#if defined(QL_DEBUG_LEVEL_14)

View file

@ -214,6 +214,7 @@
* valid range of an N-PORT id is 0 through 0x7ef.
*/
#define NPH_LAST_HANDLE 0x7ef
#define NPH_MGMT_SERVER 0x7fa /* FFFFFA */
#define NPH_SNS 0x7fc /* FFFFFC */
#define NPH_FABRIC_CONTROLLER 0x7fd /* FFFFFD */
#define NPH_F_PORT 0x7fe /* FFFFFE */
@ -630,6 +631,7 @@ typedef struct {
#define MBC_WRITE_RAM_WORD_EXTENDED 0xd /* Write RAM word extended */
#define MBC_READ_RAM_EXTENDED 0xf /* Read RAM extended. */
#define MBC_IOCB_COMMAND 0x12 /* Execute IOCB command. */
#define MBC_STOP_FIRMWARE 0x14 /* Stop firmware. */
#define MBC_ABORT_COMMAND 0x15 /* Abort IOCB command. */
#define MBC_ABORT_DEVICE 0x16 /* Abort device (ID/LUN). */
#define MBC_ABORT_TARGET 0x17 /* Abort target (ID). */
@ -913,7 +915,7 @@ typedef struct {
* MSB BIT 1 =
* MSB BIT 2 =
* MSB BIT 3 =
* MSB BIT 4 =
* MSB BIT 4 = LED mode
* MSB BIT 5 = enable 50 ohm termination
* MSB BIT 6 = Data Rate (2300 only)
* MSB BIT 7 = Data Rate (2300 only)
@ -1035,7 +1037,7 @@ typedef struct {
* MSB BIT 1 =
* MSB BIT 2 =
* MSB BIT 3 =
* MSB BIT 4 =
* MSB BIT 4 = LED mode
* MSB BIT 5 = enable 50 ohm termination
* MSB BIT 6 = Data Rate (2300 only)
* MSB BIT 7 = Data Rate (2300 only)
@ -1131,10 +1133,7 @@ typedef struct {
uint8_t link_down_timeout;
uint8_t adapter_id_0[4];
uint8_t adapter_id_1[4];
uint8_t adapter_id_2[4];
uint8_t adapter_id_3[4];
uint8_t adapter_id[16];
uint8_t alt1_boot_node_name[WWN_SIZE];
uint16_t alt1_boot_lun_number;
@ -1673,6 +1672,7 @@ typedef struct fc_port {
uint8_t cur_path; /* current path id */
struct fc_rport *rport;
u32 supported_classes;
} fc_port_t;
/*
@ -1727,6 +1727,8 @@ typedef struct fc_port {
#define CT_REJECT_RESPONSE 0x8001
#define CT_ACCEPT_RESPONSE 0x8002
#define CT_REASON_CANNOT_PERFORM 0x09
#define CT_EXPL_ALREADY_REGISTERED 0x10
#define NS_N_PORT_TYPE 0x01
#define NS_NL_PORT_TYPE 0x02
@ -1768,6 +1770,100 @@ typedef struct fc_port {
#define RSNN_NN_REQ_SIZE (16 + 8 + 1 + 255)
#define RSNN_NN_RSP_SIZE 16
/*
* HBA attribute types.
*/
#define FDMI_HBA_ATTR_COUNT 9
#define FDMI_HBA_NODE_NAME 1
#define FDMI_HBA_MANUFACTURER 2
#define FDMI_HBA_SERIAL_NUMBER 3
#define FDMI_HBA_MODEL 4
#define FDMI_HBA_MODEL_DESCRIPTION 5
#define FDMI_HBA_HARDWARE_VERSION 6
#define FDMI_HBA_DRIVER_VERSION 7
#define FDMI_HBA_OPTION_ROM_VERSION 8
#define FDMI_HBA_FIRMWARE_VERSION 9
#define FDMI_HBA_OS_NAME_AND_VERSION 0xa
#define FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH 0xb
struct ct_fdmi_hba_attr {
uint16_t type;
uint16_t len;
union {
uint8_t node_name[WWN_SIZE];
uint8_t manufacturer[32];
uint8_t serial_num[8];
uint8_t model[16];
uint8_t model_desc[80];
uint8_t hw_version[16];
uint8_t driver_version[32];
uint8_t orom_version[16];
uint8_t fw_version[16];
uint8_t os_version[128];
uint8_t max_ct_len[4];
} a;
};
struct ct_fdmi_hba_attributes {
uint32_t count;
struct ct_fdmi_hba_attr entry[FDMI_HBA_ATTR_COUNT];
};
/*
* Port attribute types.
*/
#define FDMI_PORT_ATTR_COUNT 5
#define FDMI_PORT_FC4_TYPES 1
#define FDMI_PORT_SUPPORT_SPEED 2
#define FDMI_PORT_CURRENT_SPEED 3
#define FDMI_PORT_MAX_FRAME_SIZE 4
#define FDMI_PORT_OS_DEVICE_NAME 5
#define FDMI_PORT_HOST_NAME 6
struct ct_fdmi_port_attr {
uint16_t type;
uint16_t len;
union {
uint8_t fc4_types[32];
uint32_t sup_speed;
uint32_t cur_speed;
uint32_t max_frame_size;
uint8_t os_dev_name[32];
uint8_t host_name[32];
} a;
};
/*
* Port Attribute Block.
*/
struct ct_fdmi_port_attributes {
uint32_t count;
struct ct_fdmi_port_attr entry[FDMI_PORT_ATTR_COUNT];
};
/* FDMI definitions. */
#define GRHL_CMD 0x100
#define GHAT_CMD 0x101
#define GRPL_CMD 0x102
#define GPAT_CMD 0x110
#define RHBA_CMD 0x200
#define RHBA_RSP_SIZE 16
#define RHAT_CMD 0x201
#define RPRT_CMD 0x210
#define RPA_CMD 0x211
#define RPA_RSP_SIZE 16
#define DHBA_CMD 0x300
#define DHBA_REQ_SIZE (16 + 8)
#define DHBA_RSP_SIZE 16
#define DHAT_CMD 0x301
#define DPRT_CMD 0x310
#define DPA_CMD 0x311
/* CT command header -- request/response common fields */
struct ct_cmd_hdr {
uint8_t revision;
@ -1825,6 +1921,43 @@ struct ct_sns_req {
uint8_t name_len;
uint8_t sym_node_name[255];
} rsnn_nn;
struct {
uint8_t hba_indentifier[8];
} ghat;
struct {
uint8_t hba_identifier[8];
uint32_t entry_count;
uint8_t port_name[8];
struct ct_fdmi_hba_attributes attrs;
} rhba;
struct {
uint8_t hba_identifier[8];
struct ct_fdmi_hba_attributes attrs;
} rhat;
struct {
uint8_t port_name[8];
struct ct_fdmi_port_attributes attrs;
} rpa;
struct {
uint8_t port_name[8];
} dhba;
struct {
uint8_t port_name[8];
} dhat;
struct {
uint8_t port_name[8];
} dprt;
struct {
uint8_t port_name[8];
} dpa;
} req;
};
@ -1882,6 +2015,12 @@ struct ct_sns_rsp {
struct {
uint8_t fc4_types[32];
} gft_id;
struct {
uint32_t entry_count;
uint8_t port_name[8];
struct ct_fdmi_hba_attributes attrs;
} ghat;
} rsp;
};
@ -2032,6 +2171,8 @@ struct isp_operations {
uint16_t (*calc_req_entries) (uint16_t);
void (*build_iocbs) (srb_t *, cmd_entry_t *, uint16_t);
void * (*prep_ms_iocb) (struct scsi_qla_host *, uint32_t, uint32_t);
void * (*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t,
uint32_t);
uint8_t * (*read_nvram) (struct scsi_qla_host *, uint8_t *,
uint32_t, uint32_t);
@ -2111,6 +2252,7 @@ typedef struct scsi_qla_host {
#define IOCTL_ERROR_RECOVERY 23
#define LOOP_RESET_NEEDED 24
#define BEACON_BLINK_NEEDED 25
#define REGISTER_FDMI_NEEDED 26
uint32_t device_flags;
#define DFLG_LOCAL_DEVICES BIT_0
@ -2204,6 +2346,7 @@ typedef struct scsi_qla_host {
int port_down_retry_count;
uint8_t mbx_count;
uint16_t last_loop_id;
uint16_t mgmt_svr_loop_id;
uint32_t login_retry_count;
@ -2318,6 +2461,7 @@ typedef struct scsi_qla_host {
uint8_t model_number[16+1];
#define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
char *model_desc;
uint8_t adapter_id[16+1];
uint8_t *node_name;
uint8_t *port_name;
@ -2377,6 +2521,7 @@ typedef struct scsi_qla_host {
#define QLA_SUSPENDED 0x106
#define QLA_BUSY 0x107
#define QLA_RSCNS_HANDLED 0x108
#define QLA_ALREADY_REGISTERED 0x109
/*
* Stat info for all adpaters

View file

@ -79,6 +79,7 @@ extern int ql2xplogiabsentdevice;
extern int ql2xenablezio;
extern int ql2xintrdelaytimer;
extern int ql2xloginretrycount;
extern int ql2xfdmienable;
extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *);
@ -146,9 +147,6 @@ extern int
qla2x00_abort_target(fc_port_t *);
#endif
extern int
qla2x00_target_reset(scsi_qla_host_t *, struct fc_port *);
extern int
qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
uint8_t *, uint16_t *);
@ -215,6 +213,9 @@ qla2x00_get_serdes_params(scsi_qla_host_t *, uint16_t *, uint16_t *,
extern int
qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);
extern int
qla2x00_stop_firmware(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_isr.c source file.
*/
@ -269,6 +270,9 @@ extern int qla2x00_rft_id(scsi_qla_host_t *);
extern int qla2x00_rff_id(scsi_qla_host_t *);
extern int qla2x00_rnn_id(scsi_qla_host_t *);
extern int qla2x00_rsnn_nn(scsi_qla_host_t *);
extern void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
extern int qla2x00_fdmi_register(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_rscn.c source file.
@ -289,6 +293,8 @@ extern void qla2x00_cancel_io_descriptors(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_attr.c source file.
*/
struct class_device_attribute;
extern struct class_device_attribute *qla2x00_host_attrs[];
struct fc_function_template;
extern struct fc_function_template qla2xxx_transport_functions;
extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);

View file

@ -1099,3 +1099,567 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *ha)
return (rval);
}
/**
* qla2x00_mgmt_svr_login() - Login to fabric Managment Service.
* @ha: HA context
*
* Returns 0 on success.
*/
static int
qla2x00_mgmt_svr_login(scsi_qla_host_t *ha)
{
int ret;
uint16_t mb[MAILBOX_REGISTER_COUNT];
ret = QLA_SUCCESS;
if (ha->flags.management_server_logged_in)
return ret;
ha->isp_ops.fabric_login(ha, ha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
mb, BIT_1);
if (mb[0] != MBS_COMMAND_COMPLETE) {
DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
"loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
__func__, ha->host_no, ha->mgmt_svr_loop_id, mb[0], mb[1],
mb[2], mb[6], mb[7]));
ret = QLA_FUNCTION_FAILED;
} else
ha->flags.management_server_logged_in = 1;
return ret;
}
/**
* qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
* @ha: HA context
* @req_size: request size in bytes
* @rsp_size: response size in bytes
*
* Returns a pointer to the @ha's ms_iocb.
*/
void *
qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
uint32_t rsp_size)
{
ms_iocb_entry_t *ms_pkt;
ms_pkt = ha->ms_iocb;
memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
ms_pkt->entry_type = MS_IOCB_TYPE;
ms_pkt->entry_count = 1;
SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id);
ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
ms_pkt->timeout = __constant_cpu_to_le16(59);
ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
ms_pkt->req_bytecount = cpu_to_le32(req_size);
ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
return ms_pkt;
}
/**
* qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
* @ha: HA context
* @req_size: request size in bytes
* @rsp_size: response size in bytes
*
* Returns a pointer to the @ha's ms_iocb.
*/
void *
qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
uint32_t rsp_size)
{
struct ct_entry_24xx *ct_pkt;
ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
ct_pkt->entry_type = CT_IOCB_TYPE;
ct_pkt->entry_count = 1;
ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id);
ct_pkt->timeout = __constant_cpu_to_le16(59);
ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
return ct_pkt;
}
static inline ms_iocb_entry_t *
qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size)
{
ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
} else {
ms_pkt->req_bytecount = cpu_to_le32(req_size);
ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
}
return ms_pkt;
}
/**
* qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
* @ct_req: CT request buffer
* @cmd: GS command
* @rsp_size: response size in bytes
*
* Returns a pointer to the intitialized @ct_req.
*/
static inline struct ct_sns_req *
qla2x00_prep_ct_fdmi_req(struct ct_sns_req *ct_req, uint16_t cmd,
uint16_t rsp_size)
{
memset(ct_req, 0, sizeof(struct ct_sns_pkt));
ct_req->header.revision = 0x01;
ct_req->header.gs_type = 0xFA;
ct_req->header.gs_subtype = 0x10;
ct_req->command = cpu_to_be16(cmd);
ct_req->max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
return ct_req;
}
/**
* qla2x00_fdmi_rhba() -
* @ha: HA context
*
* Returns 0 on success.
*/
static int
qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
{
int rval, alen;
uint32_t size, sn;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
uint8_t *entries;
struct ct_fdmi_hba_attr *eiter;
/* Issue RHBA */
/* Prepare common MS IOCB */
/* Request size adjusted after CT preparation */
ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, 0, RHBA_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD,
RHBA_RSP_SIZE);
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare FDMI command arguments -- attribute block, attributes. */
memcpy(ct_req->req.rhba.hba_identifier, ha->port_name, WWN_SIZE);
ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1);
memcpy(ct_req->req.rhba.port_name, ha->port_name, WWN_SIZE);
size = 2 * WWN_SIZE + 4 + 4;
/* Attributes */
ct_req->req.rhba.attrs.count =
__constant_cpu_to_be32(FDMI_HBA_ATTR_COUNT);
entries = ct_req->req.rhba.hba_identifier;
/* Nodename. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME);
eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE);
memcpy(eiter->a.node_name, ha->node_name, WWN_SIZE);
size += 4 + WWN_SIZE;
DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n",
__func__, ha->host_no,
eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2],
eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5],
eiter->a.node_name[6], eiter->a.node_name[7]));
/* Manufacturer. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
strcpy(eiter->a.manufacturer, "QLogic Corporation");
alen = strlen(eiter->a.manufacturer);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, ha->host_no,
eiter->a.manufacturer));
/* Serial number. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
sprintf(eiter->a.serial_num, "%c%05d", 'A' + sn / 100000, sn % 100000);
alen = strlen(eiter->a.serial_num);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, ha->host_no,
eiter->a.serial_num));
/* Model name. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL);
strcpy(eiter->a.model, ha->model_number);
alen = strlen(eiter->a.model);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, ha->host_no,
eiter->a.model));
/* Model description. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
if (ha->model_desc)
strncpy(eiter->a.model_desc, ha->model_desc, 80);
alen = strlen(eiter->a.model_desc);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, ha->host_no,
eiter->a.model_desc));
/* Hardware version. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
strcpy(eiter->a.hw_version, ha->adapter_id);
alen = strlen(eiter->a.hw_version);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, ha->host_no,
eiter->a.hw_version));
/* Driver version. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
strcpy(eiter->a.driver_version, qla2x00_version_str);
alen = strlen(eiter->a.driver_version);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, ha->host_no,
eiter->a.driver_version));
/* Option ROM version. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
strcpy(eiter->a.orom_version, "0.00");
alen = strlen(eiter->a.orom_version);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, ha->host_no,
eiter->a.orom_version));
/* Firmware version */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
ha->isp_ops.fw_version_str(ha, eiter->a.fw_version);
alen = strlen(eiter->a.fw_version);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, ha->host_no,
eiter->a.fw_version));
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(ha, size + 16);
DEBUG13(printk("%s(%ld): RHBA identifier="
"%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
ha->host_no, ct_req->req.rhba.hba_identifier[0],
ct_req->req.rhba.hba_identifier[1],
ct_req->req.rhba.hba_identifier[2],
ct_req->req.rhba.hba_identifier[3],
ct_req->req.rhba.hba_identifier[4],
ct_req->req.rhba.hba_identifier[5],
ct_req->req.rhba.hba_identifier[6],
ct_req->req.rhba.hba_identifier[7], size));
DEBUG13(qla2x00_dump_buffer(entries, size));
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n",
ha->host_no, rval));
} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RHBA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
ct_rsp->header.explanation_code ==
CT_EXPL_ALREADY_REGISTERED) {
DEBUG2_13(printk("%s(%ld): HBA already registered.\n",
__func__, ha->host_no));
rval = QLA_ALREADY_REGISTERED;
}
} else {
DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n",
ha->host_no));
}
return rval;
}
/**
* qla2x00_fdmi_dhba() -
* @ha: HA context
*
* Returns 0 on success.
*/
static int
qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
{
int rval;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
/* Issue RPA */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, DHBA_REQ_SIZE,
DHBA_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, DHBA_CMD,
DHBA_RSP_SIZE);
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare FDMI command arguments -- portname. */
memcpy(ct_req->req.dhba.port_name, ha->port_name, WWN_SIZE);
DEBUG13(printk("%s(%ld): DHBA portname="
"%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, ha->host_no,
ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]));
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n",
ha->host_no, rval));
} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "DHBA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n",
ha->host_no));
}
return rval;
}
/**
* qla2x00_fdmi_rpa() -
* @ha: HA context
*
* Returns 0 on success.
*/
static int
qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
{
int rval, alen;
uint32_t size, max_frame_size;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
uint8_t *entries;
struct ct_fdmi_port_attr *eiter;
struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
/* Issue RPA */
/* Prepare common MS IOCB */
/* Request size adjusted after CT preparation */
ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD,
RPA_RSP_SIZE);
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare FDMI command arguments -- attribute block, attributes. */
memcpy(ct_req->req.rpa.port_name, ha->port_name, WWN_SIZE);
size = WWN_SIZE + 4;
/* Attributes */
ct_req->req.rpa.attrs.count =
__constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT);
entries = ct_req->req.rpa.port_name;
/* FC4 types. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_FC4_TYPES);
eiter->len = __constant_cpu_to_be16(4 + 32);
eiter->a.fc4_types[2] = 0x01;
size += 4 + 32;
DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__, ha->host_no,
eiter->a.fc4_types[2], eiter->a.fc4_types[1]));
/* Supported speed. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
eiter->len = __constant_cpu_to_be16(4 + 4);
if (IS_QLA25XX(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(4);
else if (IS_QLA24XX(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(8);
else if (IS_QLA23XX(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(2);
else
eiter->a.sup_speed = __constant_cpu_to_be32(1);
size += 4 + 4;
DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, ha->host_no,
eiter->a.sup_speed));
/* Current speed. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
eiter->len = __constant_cpu_to_be16(4 + 4);
switch (ha->link_data_rate) {
case 0:
eiter->a.cur_speed = __constant_cpu_to_be32(1);
break;
case 1:
eiter->a.cur_speed = __constant_cpu_to_be32(2);
break;
case 3:
eiter->a.cur_speed = __constant_cpu_to_be32(8);
break;
case 4:
eiter->a.cur_speed = __constant_cpu_to_be32(4);
break;
}
size += 4 + 4;
DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, ha->host_no,
eiter->a.cur_speed));
/* Max frame size. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
eiter->len = __constant_cpu_to_be16(4 + 4);
max_frame_size = IS_QLA24XX(ha) || IS_QLA25XX(ha) ?
(uint32_t) icb24->frame_payload_size:
(uint32_t) ha->init_cb->frame_payload_size;
eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
size += 4 + 4;
DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, ha->host_no,
eiter->a.max_frame_size));
/* OS device name. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
sprintf(eiter->a.os_dev_name, "/proc/scsi/qla2xxx/%ld", ha->host_no);
alen = strlen(eiter->a.os_dev_name);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, ha->host_no,
eiter->a.os_dev_name));
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(ha, size + 16);
DEBUG13(printk("%s(%ld): RPA portname="
"%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
ha->host_no, ct_req->req.rpa.port_name[0],
ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2],
ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4],
ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6],
ct_req->req.rpa.port_name[7], size));
DEBUG13(qla2x00_dump_buffer(entries, size));
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n",
ha->host_no, rval));
} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RPA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): RPA exiting normally.\n",
ha->host_no));
}
return rval;
}
/**
* qla2x00_fdmi_register() -
* @ha: HA context
*
* Returns 0 on success.
*/
int
qla2x00_fdmi_register(scsi_qla_host_t *ha)
{
int rval;
rval = qla2x00_mgmt_svr_login(ha);
if (rval)
return rval;
rval = qla2x00_fdmi_rhba(ha);
if (rval) {
if (rval != QLA_ALREADY_REGISTERED)
return rval;
rval = qla2x00_fdmi_dhba(ha);
if (rval)
return rval;
rval = qla2x00_fdmi_rhba(ha);
if (rval)
return rval;
}
rval = qla2x00_fdmi_rpa(ha);
return rval;
}

View file

@ -88,6 +88,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
ha->mbx_flags = 0;
ha->isp_abort_cnt = 0;
ha->beacon_blink_led = 0;
set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
rval = ha->isp_ops.pci_config(ha);
@ -1563,7 +1564,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
ha->flags.enable_led_scheme = ((nv->efi_parameters & BIT_3) ? 1 : 0);
ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
ha->operating_mode =
(icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
@ -1697,6 +1698,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, int flags)
fcport->iodesc_idx_sent = IODESC_INVALID_INDEX;
atomic_set(&fcport->state, FCS_UNCONFIGURED);
fcport->flags = FCF_RLC_SUPPORT;
fcport->supported_classes = FC_COS_UNSPECIFIED;
return (fcport);
}
@ -1898,7 +1900,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
continue;
/* Bypass if not same domain and area of adapter. */
if (area != ha->d_id.b.area || domain != ha->d_id.b.domain)
if (area && domain &&
(area != ha->d_id.b.area || domain != ha->d_id.b.domain))
continue;
/* Bypass invalid local loop ID. */
@ -2075,6 +2078,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
return;
}
rport->dd_data = fcport;
rport->supported_classes = fcport->supported_classes;
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
if (fcport->port_type == FCT_INITIATOR)
@ -2130,6 +2134,11 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
return (QLA_SUCCESS);
}
do {
/* FDMI support. */
if (ql2xfdmienable &&
test_and_clear_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags))
qla2x00_fdmi_register(ha);
/* Ensure we are logged into the SNS. */
if (IS_QLA24XX(ha) || IS_QLA25XX(ha))
loop_id = NPH_SNS;
@ -2392,6 +2401,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
if (new_fcport->d_id.b24 == ha->d_id.b24)
continue;
/* Bypass if same domain and area of adapter. */
if (((new_fcport->d_id.b24 & 0xffff00) ==
(ha->d_id.b24 & 0xffff00)) && ha->current_topology ==
ISP_CFG_FL)
continue;
/* Bypass reserved domain fields. */
if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
continue;
@ -2794,6 +2809,11 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
}
}
if (mb[10] & BIT_0)
fcport->supported_classes |= FC_COS_CLASS2;
if (mb[10] & BIT_1)
fcport->supported_classes |= FC_COS_CLASS3;
rval = QLA_SUCCESS;
break;
} else if (mb[0] == MBS_LOOP_ID_USED) {

View file

@ -810,12 +810,8 @@ qla24xx_start_scsi(srb_t *sp)
ha->req_q_cnt = ha->request_q_length -
(ha->req_ring_index - cnt);
}
if (ha->req_q_cnt < (req_cnt + 2)) {
if (cmd->use_sg)
pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
cmd->sc_data_direction);
if (ha->req_q_cnt < (req_cnt + 2))
goto queuing_error;
}
/* Build command packet. */
ha->current_outstanding_cmd = handle;

View file

@ -451,6 +451,8 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
ha->flags.management_server_logged_in = 0;
ha->link_data_rate = 0;
if (ql2xfdmienable)
set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
/* Update AEN queue. */
qla2x00_enqueue_aen(ha, MBA_LOOP_DOWN, NULL);

View file

@ -19,6 +19,7 @@
#include "qla_def.h"
#include <linux/delay.h>
#include <scsi/scsi_transport_fc.h>
static void
qla2x00_mbx_sem_timeout(unsigned long data)
@ -251,7 +252,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
} else {
mb0 = RD_MAILBOX_REG(ha, reg->isp, 0);
mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
ictrl = RD_REG_WORD(&reg->isp.ictrl);
}
printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n",
@ -982,58 +983,6 @@ qla2x00_abort_target(fc_port_t *fcport)
}
#endif
/*
* qla2x00_target_reset
* Issue target reset mailbox command.
*
* Input:
* ha = adapter block pointer.
* TARGET_QUEUE_LOCK must be released.
* ADAPTER_STATE_LOCK must be released.
*
* Returns:
* qla2x00 local function return status code.
*
* Context:
* Kernel context.
*/
int
qla2x00_target_reset(scsi_qla_host_t *ha, struct fc_port *fcport)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
DEBUG11(printk("qla2x00_target_reset(%ld): entered.\n", ha->host_no);)
if (atomic_read(&fcport->state) != FCS_ONLINE)
return 0;
mcp->mb[0] = MBC_TARGET_RESET;
if (HAS_EXTENDED_IDS(ha))
mcp->mb[1] = fcport->loop_id;
else
mcp->mb[1] = fcport->loop_id << 8;
mcp->mb[2] = ha->loop_reset_delay;
mcp->out_mb = MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
mcp->tov = 30;
mcp->flags = 0;
rval = qla2x00_mailbox_command(ha, mcp);
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("qla2x00_target_reset(%ld): failed=%x.\n",
ha->host_no, rval);)
} else {
/*EMPTY*/
DEBUG11(printk("qla2x00_target_reset(%ld): done.\n",
ha->host_no);)
}
return rval;
}
/*
* qla2x00_get_adapter_id
* Get adapter ID and topology.
@ -1326,6 +1275,10 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
fcport->port_type = FCT_INITIATOR;
else
fcport->port_type = FCT_TARGET;
/* Passback COS information. */
fcport->supported_classes = (pd->options & BIT_4) ?
FC_COS_CLASS2: FC_COS_CLASS3;
}
gpd_error_out:
@ -1661,6 +1614,13 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
mb[1] |= BIT_1;
} else
mb[1] = BIT_0;
/* Passback COS information. */
mb[10] = 0;
if (lg->io_parameter[7] || lg->io_parameter[8])
mb[10] |= BIT_0; /* Class 2. */
if (lg->io_parameter[9] || lg->io_parameter[10])
mb[10] |= BIT_1; /* Class 3. */
}
dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@ -1723,6 +1683,8 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
mb[2] = mcp->mb[2];
mb[6] = mcp->mb[6];
mb[7] = mcp->mb[7];
/* COS retrieved from Get-Port-Database mailbox command. */
mb[10] = 0;
}
if (rval != QLA_SUCCESS) {
@ -2465,3 +2427,32 @@ qla2x00_set_serdes_params(scsi_qla_host_t *ha, uint16_t sw_em_1g,
return rval;
}
int
qla2x00_stop_firmware(scsi_qla_host_t *ha)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
if (!IS_QLA24XX(ha) && !IS_QLA25XX(ha))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
mcp->mb[0] = MBC_STOP_FIRMWARE;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_0;
mcp->tov = 5;
mcp->flags = 0;
rval = qla2x00_mailbox_command(ha, mcp);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
ha->host_no, rval));
} else {
DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
}
return rval;
}

View file

@ -79,7 +79,7 @@ module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xloginretrycount,
"Specify an alternate value for the NVRAM login retry count.");
int ql2xfwloadbin;
int ql2xfwloadbin=1;
module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xfwloadbin,
"Load ISP2xxx firmware image via hotplug.");
@ -88,6 +88,12 @@ static void qla2x00_free_device(scsi_qla_host_t *);
static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha);
int ql2xfdmienable;
module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xfdmienable,
"Enables FDMI registratons "
"Default is 0 - no FDMI. 1 - perfom FDMI.");
/*
* SCSI host template entry points
*/
@ -105,6 +111,9 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
static int qla2x00_loop_reset(scsi_qla_host_t *ha);
static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *);
static int qla2x00_change_queue_depth(struct scsi_device *, int);
static int qla2x00_change_queue_type(struct scsi_device *, int);
static struct scsi_host_template qla2x00_driver_template = {
.module = THIS_MODULE,
.name = "qla2xxx",
@ -119,6 +128,8 @@ static struct scsi_host_template qla2x00_driver_template = {
.slave_alloc = qla2xxx_slave_alloc,
.slave_destroy = qla2xxx_slave_destroy,
.change_queue_depth = qla2x00_change_queue_depth,
.change_queue_type = qla2x00_change_queue_type,
.this_id = -1,
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
@ -129,6 +140,7 @@ static struct scsi_host_template qla2x00_driver_template = {
* which equates to 0x800000 sectors.
*/
.max_sectors = 0xFFFF,
.shost_attrs = qla2x00_host_attrs,
};
static struct scsi_host_template qla24xx_driver_template = {
@ -145,12 +157,15 @@ static struct scsi_host_template qla24xx_driver_template = {
.slave_alloc = qla2xxx_slave_alloc,
.slave_destroy = qla2xxx_slave_destroy,
.change_queue_depth = qla2x00_change_queue_depth,
.change_queue_type = qla2x00_change_queue_type,
.this_id = -1,
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF,
.shost_attrs = qla2x00_host_attrs,
};
static struct scsi_transport_template *qla2xxx_transport_template = NULL;
@ -487,14 +502,13 @@ qc24_fail_command:
static int
qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
{
#define ABORT_POLLING_PERIOD HZ
#define ABORT_WAIT_ITER ((10 * HZ) / (ABORT_POLLING_PERIOD))
#define ABORT_POLLING_PERIOD 1000
#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
unsigned long wait_iter = ABORT_WAIT_ITER;
int ret = QLA_SUCCESS;
while (CMD_SP(cmd)) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(ABORT_POLLING_PERIOD);
msleep(ABORT_POLLING_PERIOD);
if (--wait_iter)
break;
@ -1016,7 +1030,7 @@ qla2x00_loop_reset(scsi_qla_host_t *ha)
if (fcport->port_type != FCT_TARGET)
continue;
status = qla2x00_target_reset(ha, fcport);
status = qla2x00_device_reset(ha, fcport);
if (status != QLA_SUCCESS)
break;
}
@ -1103,6 +1117,28 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
sdev->hostdata = NULL;
}
static int
qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth)
{
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
return sdev->queue_depth;
}
static int
qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
{
if (sdev->tagged_supported) {
scsi_set_tag_type(sdev, tag_type);
if (tag_type)
scsi_activate_tcq(sdev, sdev->queue_depth);
else
scsi_deactivate_tcq(sdev, sdev->queue_depth);
} else
tag_type = 0;
return tag_type;
}
/**
* qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
* @ha: HA context
@ -1113,36 +1149,23 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
static void
qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
{
/* Assume 32bit DMA address */
/* Assume a 32bit DMA mask. */
ha->flags.enable_64bit_addressing = 0;
/*
* Given the two variants pci_set_dma_mask(), allow the compiler to
* assist in setting the proper dma mask.
*/
if (sizeof(dma_addr_t) > 4) {
if (pci_set_dma_mask(ha->pdev, DMA_64BIT_MASK) == 0) {
if (!dma_set_mask(&ha->pdev->dev, DMA_64BIT_MASK)) {
/* Any upper-dword bits set? */
if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
!pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
/* Ok, a 64bit DMA mask is applicable. */
ha->flags.enable_64bit_addressing = 1;
ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_64;
ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_64;
if (pci_set_consistent_dma_mask(ha->pdev,
DMA_64BIT_MASK)) {
qla_printk(KERN_DEBUG, ha,
"Failed to set 64 bit PCI consistent mask; "
"using 32 bit.\n");
pci_set_consistent_dma_mask(ha->pdev,
DMA_32BIT_MASK);
}
} else {
qla_printk(KERN_DEBUG, ha,
"Failed to set 64 bit PCI DMA mask, falling back "
"to 32 bit MASK.\n");
pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK);
return;
}
} else {
pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK);
}
dma_set_mask(&ha->pdev->dev, DMA_32BIT_MASK);
pci_set_consistent_dma_mask(ha->pdev, DMA_32BIT_MASK);
}
static int
@ -1316,6 +1339,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
ha->prev_topology = 0;
ha->ports = MAX_BUSES;
ha->init_cb_size = sizeof(init_cb_t);
ha->mgmt_svr_loop_id = MANAGEMENT_SERVER;
/* Assign ISP specific operations. */
ha->isp_ops.pci_config = qla2100_pci_config;
@ -1338,6 +1362,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_32;
ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_32;
ha->isp_ops.prep_ms_iocb = qla2x00_prep_ms_iocb;
ha->isp_ops.prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb;
ha->isp_ops.read_nvram = qla2x00_read_nvram_data;
ha->isp_ops.write_nvram = qla2x00_write_nvram_data;
ha->isp_ops.fw_dump = qla2100_fw_dump;
@ -1375,6 +1400,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct init_cb_24xx);
ha->mgmt_svr_loop_id = 10;
ha->isp_ops.pci_config = qla24xx_pci_config;
ha->isp_ops.reset_chip = qla24xx_reset_chip;
ha->isp_ops.chip_diag = qla24xx_chip_diag;
@ -1395,6 +1421,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
ha->isp_ops.fabric_login = qla24xx_login_fabric;
ha->isp_ops.fabric_logout = qla24xx_fabric_logout;
ha->isp_ops.prep_ms_iocb = qla24xx_prep_ms_iocb;
ha->isp_ops.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb;
ha->isp_ops.read_nvram = qla24xx_read_nvram_data;
ha->isp_ops.write_nvram = qla24xx_write_nvram_data;
ha->isp_ops.fw_dump = qla24xx_fw_dump;
@ -1558,8 +1585,6 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
return 0;
probe_failed:
fc_remove_host(ha->host);
qla2x00_free_device(ha);
scsi_host_put(host);
@ -1601,10 +1626,6 @@ qla2x00_free_device(scsi_qla_host_t *ha)
if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
qla2x00_cancel_io_descriptors(ha);
/* turn-off interrupts on the card */
if (ha->interrupts_on)
ha->isp_ops.disable_intrs(ha);
/* Disable timer */
if (ha->timer_active)
qla2x00_stop_timer(ha);
@ -1624,8 +1645,14 @@ qla2x00_free_device(scsi_qla_host_t *ha)
}
}
qla2x00_mem_free(ha);
/* Stop currently executing firmware. */
qla2x00_stop_firmware(ha);
/* turn-off interrupts on the card */
if (ha->interrupts_on)
ha->isp_ops.disable_intrs(ha);
qla2x00_mem_free(ha);
ha->flags.online = 0;
@ -1934,7 +1961,7 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
{
struct list_head *fcpl, *fcptemp;
fc_port_t *fcport;
unsigned long wtime;/* max wait time if mbx cmd is busy. */
unsigned int wtime;/* max wait time if mbx cmd is busy. */
if (ha == NULL) {
/* error */
@ -1943,11 +1970,9 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
}
/* Make sure all other threads are stopped. */
wtime = 60 * HZ;
while (ha->dpc_wait && wtime) {
set_current_state(TASK_INTERRUPTIBLE);
wtime = schedule_timeout(wtime);
}
wtime = 60 * 1000;
while (ha->dpc_wait && wtime)
wtime = msleep_interruptible(wtime);
/* free ioctl memory */
qla2x00_free_ioctl_mem(ha);
@ -2478,15 +2503,15 @@ qla2x00_timer(scsi_qla_host_t *ha)
int
qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
{
const unsigned int step = HZ/10;
const unsigned int step = 100; /* msecs */
unsigned int iterations = jiffies_to_msecs(timeout)/100;
do {
if (!down_trylock(sema))
return 0;
set_current_state(TASK_INTERRUPTIBLE);
if (schedule_timeout(step))
if (msleep_interruptible(step))
break;
} while ((timeout -= step) > 0);
} while (--iterations >= 0);
return -ETIMEDOUT;
}

View file

@ -468,21 +468,12 @@ qla24xx_read_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
uint32_t dwords)
{
uint32_t i;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
/* Pause RISC. */
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
/* Dword reads to flash. */
for (i = 0; i < dwords; i++, faddr++)
dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
flash_data_to_access_addr(faddr)));
/* Release RISC pause. */
WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
return dwptr;
}
@ -532,10 +523,6 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
ret = QLA_SUCCESS;
/* Pause RISC. */
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
DEBUG9(printk("%s(%ld): Flash man_id=%d flash_id=%d\n", __func__,
ha->host_no, man_id, flash_id));
@ -599,10 +586,6 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
/* Release RISC pause. */
WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
return ret;
}
@ -630,11 +613,6 @@ qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
{
uint32_t i;
uint32_t *dwptr;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
/* Pause RISC. */
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
/* Dword reads to flash. */
dwptr = (uint32_t *)buf;
@ -642,10 +620,6 @@ qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
nvram_data_to_access_addr(naddr)));
/* Release RISC pause. */
WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
return buf;
}
@ -690,10 +664,6 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
ret = QLA_SUCCESS;
/* Pause RISC. */
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
/* Enable flash write. */
WRT_REG_DWORD(&reg->ctrl_status,
RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
@ -728,9 +698,5 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
/* Release RISC pause. */
WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
return ret;
}

View file

@ -19,9 +19,9 @@
/*
* Driver version
*/
#define QLA2XXX_VERSION "8.01.00b5-k"
#define QLA2XXX_VERSION "8.01.00-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 1
#define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 5
#define QLA_DRIVER_BETA_VER 0

250
drivers/scsi/raid_class.c Normal file
View file

@ -0,0 +1,250 @@
/*
* RAID Attributes
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/raid_class.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#define RAID_NUM_ATTRS 3
struct raid_internal {
struct raid_template r;
struct raid_function_template *f;
/* The actual attributes */
struct class_device_attribute private_attrs[RAID_NUM_ATTRS];
/* The array of null terminated pointers to attributes
* needed by scsi_sysfs.c */
struct class_device_attribute *attrs[RAID_NUM_ATTRS + 1];
};
struct raid_component {
struct list_head node;
struct device *dev;
int num;
};
#define to_raid_internal(tmpl) container_of(tmpl, struct raid_internal, r)
#define tc_to_raid_internal(tcont) ({ \
struct raid_template *r = \
container_of(tcont, struct raid_template, raid_attrs); \
to_raid_internal(r); \
})
#define ac_to_raid_internal(acont) ({ \
struct transport_container *tc = \
container_of(acont, struct transport_container, ac); \
tc_to_raid_internal(tc); \
})
#define class_device_to_raid_internal(cdev) ({ \
struct attribute_container *ac = \
attribute_container_classdev_to_container(cdev); \
ac_to_raid_internal(ac); \
})
static int raid_match(struct attribute_container *cont, struct device *dev)
{
/* We have to look for every subsystem that could house
* emulated RAID devices, so start with SCSI */
struct raid_internal *i = ac_to_raid_internal(cont);
if (scsi_is_sdev_device(dev)) {
struct scsi_device *sdev = to_scsi_device(dev);
if (i->f->cookie != sdev->host->hostt)
return 0;
return i->f->is_raid(dev);
}
/* FIXME: look at other subsystems too */
return 0;
}
static int raid_setup(struct transport_container *tc, struct device *dev,
struct class_device *cdev)
{
struct raid_data *rd;
BUG_ON(class_get_devdata(cdev));
rd = kmalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
return -ENOMEM;
memset(rd, 0, sizeof(*rd));
INIT_LIST_HEAD(&rd->component_list);
class_set_devdata(cdev, rd);
return 0;
}
static int raid_remove(struct transport_container *tc, struct device *dev,
struct class_device *cdev)
{
struct raid_data *rd = class_get_devdata(cdev);
struct raid_component *rc, *next;
class_set_devdata(cdev, NULL);
list_for_each_entry_safe(rc, next, &rd->component_list, node) {
char buf[40];
snprintf(buf, sizeof(buf), "component-%d", rc->num);
list_del(&rc->node);
sysfs_remove_link(&cdev->kobj, buf);
kfree(rc);
}
kfree(class_get_devdata(cdev));
return 0;
}
static DECLARE_TRANSPORT_CLASS(raid_class,
"raid_devices",
raid_setup,
raid_remove,
NULL);
static struct {
enum raid_state value;
char *name;
} raid_states[] = {
{ RAID_ACTIVE, "active" },
{ RAID_DEGRADED, "degraded" },
{ RAID_RESYNCING, "resyncing" },
{ RAID_OFFLINE, "offline" },
};
static const char *raid_state_name(enum raid_state state)
{
int i;
char *name = NULL;
for (i = 0; i < sizeof(raid_states)/sizeof(raid_states[0]); i++) {
if (raid_states[i].value == state) {
name = raid_states[i].name;
break;
}
}
return name;
}
#define raid_attr_show_internal(attr, fmt, var, code) \
static ssize_t raid_show_##attr(struct class_device *cdev, char *buf) \
{ \
struct raid_data *rd = class_get_devdata(cdev); \
code \
return snprintf(buf, 20, #fmt "\n", var); \
}
#define raid_attr_ro_states(attr, states, code) \
raid_attr_show_internal(attr, %s, name, \
const char *name; \
code \
name = raid_##states##_name(rd->attr); \
) \
static CLASS_DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL)
#define raid_attr_ro_internal(attr, code) \
raid_attr_show_internal(attr, %d, rd->attr, code) \
static CLASS_DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL)
#define ATTR_CODE(attr) \
struct raid_internal *i = class_device_to_raid_internal(cdev); \
if (i->f->get_##attr) \
i->f->get_##attr(cdev->dev);
#define raid_attr_ro(attr) raid_attr_ro_internal(attr, )
#define raid_attr_ro_fn(attr) raid_attr_ro_internal(attr, ATTR_CODE(attr))
#define raid_attr_ro_state(attr) raid_attr_ro_states(attr, attr, ATTR_CODE(attr))
raid_attr_ro(level);
raid_attr_ro_fn(resync);
raid_attr_ro_state(state);
void raid_component_add(struct raid_template *r,struct device *raid_dev,
struct device *component_dev)
{
struct class_device *cdev =
attribute_container_find_class_device(&r->raid_attrs.ac,
raid_dev);
struct raid_component *rc;
struct raid_data *rd = class_get_devdata(cdev);
char buf[40];
rc = kmalloc(sizeof(*rc), GFP_KERNEL);
if (!rc)
return;
INIT_LIST_HEAD(&rc->node);
rc->dev = component_dev;
rc->num = rd->component_count++;
snprintf(buf, sizeof(buf), "component-%d", rc->num);
list_add_tail(&rc->node, &rd->component_list);
sysfs_create_link(&cdev->kobj, &component_dev->kobj, buf);
}
EXPORT_SYMBOL(raid_component_add);
struct raid_template *
raid_class_attach(struct raid_function_template *ft)
{
struct raid_internal *i = kmalloc(sizeof(struct raid_internal),
GFP_KERNEL);
int count = 0;
if (unlikely(!i))
return NULL;
memset(i, 0, sizeof(*i));
i->f = ft;
i->r.raid_attrs.ac.class = &raid_class.class;
i->r.raid_attrs.ac.match = raid_match;
i->r.raid_attrs.ac.attrs = &i->attrs[0];
attribute_container_register(&i->r.raid_attrs.ac);
i->attrs[count++] = &class_device_attr_level;
i->attrs[count++] = &class_device_attr_resync;
i->attrs[count++] = &class_device_attr_state;
i->attrs[count] = NULL;
BUG_ON(count > RAID_NUM_ATTRS);
return &i->r;
}
EXPORT_SYMBOL(raid_class_attach);
void
raid_class_release(struct raid_template *r)
{
struct raid_internal *i = to_raid_internal(r);
attribute_container_unregister(&i->r.raid_attrs.ac);
kfree(i);
}
EXPORT_SYMBOL(raid_class_release);
static __init int raid_init(void)
{
return transport_class_register(&raid_class);
}
static __exit void raid_exit(void)
{
transport_class_unregister(&raid_class);
}
MODULE_AUTHOR("James Bottomley");
MODULE_DESCRIPTION("RAID device class");
MODULE_LICENSE("GPL");
module_init(raid_init);
module_exit(raid_exit);

View file

@ -192,6 +192,7 @@ static struct {
{"SGI", "RAID5", "*", BLIST_SPARSELUN},
{"SGI", "TP9100", "*", BLIST_REPORTLUN2},
{"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */

View file

@ -20,6 +20,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
@ -115,7 +116,6 @@ void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
add_timer(&scmd->eh_timeout);
}
EXPORT_SYMBOL(scsi_add_timer);
/**
* scsi_delete_timer - Delete/cancel timer for a given function.
@ -143,7 +143,6 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
return rtn;
}
EXPORT_SYMBOL(scsi_delete_timer);
/**
* scsi_times_out - Timeout function for normal scsi commands.
@ -776,9 +775,11 @@ retry_tur:
__FUNCTION__, scmd, rtn));
if (rtn == SUCCESS)
return 0;
else if (rtn == NEEDS_RETRY)
else if (rtn == NEEDS_RETRY) {
if (retry_cnt--)
goto retry_tur;
return 0;
}
return 1;
}
@ -1583,16 +1584,8 @@ int scsi_error_handler(void *data)
int rtn;
DECLARE_MUTEX_LOCKED(sem);
/*
* Flush resources
*/
daemonize("scsi_eh_%d", shost->host_no);
current->flags |= PF_NOFREEZE;
shost->eh_wait = &sem;
shost->ehandler = current;
/*
* Wake up the thread that created us.
@ -1600,8 +1593,6 @@ int scsi_error_handler(void *data)
SCSI_LOG_ERROR_RECOVERY(3, printk("Wake up parent of"
" scsi_eh_%d\n",shost->host_no));
complete(shost->eh_notify);
while (1) {
/*
* If we get a signal, it means we are supposed to go
@ -1622,7 +1613,7 @@ int scsi_error_handler(void *data)
* semaphores isn't unreasonable.
*/
down_interruptible(&sem);
if (shost->eh_kill)
if (kthread_should_stop())
break;
SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
@ -1661,22 +1652,6 @@ int scsi_error_handler(void *data)
* Make sure that nobody tries to wake us up again.
*/
shost->eh_wait = NULL;
/*
* Knock this down too. From this point on, the host is flying
* without a pilot. If this is because the module is being unloaded,
* that's fine. If the user sent a signal to this thing, we are
* potentially in real danger.
*/
shost->eh_active = 0;
shost->ehandler = NULL;
/*
* If anyone is waiting for us to exit (i.e. someone trying to unload
* a driver), then wake up that process to let them know we are on
* the way out the door.
*/
complete_and_exit(shost->eh_notify, 0);
return 0;
}

View file

@ -30,20 +30,20 @@
#define MAX_BUF PAGE_SIZE
/*
* If we are told to probe a host, we will return 0 if the host is not
* present, 1 if the host is present, and will return an identifying
* string at *arg, if arg is non null, filling to the length stored at
* (int *) arg
/**
* ioctl_probe -- return host identification
* @host: host to identify
* @buffer: userspace buffer for identification
*
* Return an identifying string at @buffer, if @buffer is non-NULL, filling
* to the length stored at * (int *) @buffer.
*/
static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
{
unsigned int len, slen;
const char *string;
int temp = host->hostt->present;
if (temp && buffer) {
if (buffer) {
if (get_user(len, (unsigned int __user *) buffer))
return -EFAULT;
@ -59,7 +59,7 @@ static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
return -EFAULT;
}
}
return temp;
return 1;
}
/*

View file

@ -63,6 +63,9 @@ extern int __init scsi_init_devinfo(void);
extern void scsi_exit_devinfo(void);
/* scsi_error.c */
extern void scsi_add_timer(struct scsi_cmnd *, int,
void (*)(struct scsi_cmnd *));
extern int scsi_delete_timer(struct scsi_cmnd *);
extern void scsi_times_out(struct scsi_cmnd *cmd);
extern int scsi_error_handler(void *host);
extern int scsi_decide_disposition(struct scsi_cmnd *cmd);

View file

@ -972,7 +972,7 @@ static void
sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
{
unsigned char cmd[10];
unsigned long spintime_value = 0;
unsigned long spintime_expire = 0;
int retries, spintime;
unsigned int the_result;
struct scsi_sense_hdr sshdr;
@ -1049,12 +1049,27 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
scsi_execute_req(sdkp->device, cmd, DMA_NONE,
NULL, 0, &sshdr,
SD_TIMEOUT, SD_MAX_RETRIES);
spintime_value = jiffies;
spintime_expire = jiffies + 100 * HZ;
spintime = 1;
}
spintime = 1;
/* Wait 1 second for next try */
msleep(1000);
printk(".");
/*
* Wait for USB flash devices with slow firmware.
* Yes, this sense key/ASC combination shouldn't
* occur here. It's characteristic of these devices.
*/
} else if (sense_valid &&
sshdr.sense_key == UNIT_ATTENTION &&
sshdr.asc == 0x28) {
if (!spintime) {
spintime_expire = jiffies + 5 * HZ;
spintime = 1;
}
/* Wait 1 second for next try */
msleep(1000);
} else {
/* we don't understand the sense code, so it's
* probably pointless to loop */
@ -1066,8 +1081,7 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
break;
}
} while (spintime &&
time_after(spintime_value + 100 * HZ, jiffies));
} while (spintime && time_before_eq(jiffies, spintime_expire));
if (spintime) {
if (scsi_status_is_good(the_result))

View file

@ -61,7 +61,7 @@ static int sg_version_num = 30533; /* 2 digits for each component */
#ifdef CONFIG_SCSI_PROC_FS
#include <linux/proc_fs.h>
static char *sg_version_date = "20050328";
static char *sg_version_date = "20050901";
static int sg_proc_init(void);
static void sg_proc_cleanup(void);
@ -1794,12 +1794,12 @@ st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
unsigned long uaddr, size_t count, int rw,
unsigned long max_pfn)
{
unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = uaddr >> PAGE_SHIFT;
const int nr_pages = end - start;
int res, i, j;
unsigned int nr_pages;
struct page **pages;
nr_pages = ((uaddr & ~PAGE_MASK) + count + ~PAGE_MASK) >> PAGE_SHIFT;
/* User attempted Overflow! */
if ((uaddr + count) < uaddr)
return -EINVAL;

View file

@ -17,7 +17,7 @@
Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
*/
static char *verstr = "20050802";
static char *verstr = "20050830";
#include <linux/module.h>
@ -4440,12 +4440,12 @@ static int st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pag
static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
unsigned long uaddr, size_t count, int rw)
{
unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = uaddr >> PAGE_SHIFT;
const int nr_pages = end - start;
int res, i, j;
unsigned int nr_pages;
struct page **pages;
nr_pages = ((uaddr & ~PAGE_MASK) + count + ~PAGE_MASK) >> PAGE_SHIFT;
/* User attempted Overflow! */
if ((uaddr + count) < uaddr)
return -EINVAL;

View file

@ -11,12 +11,12 @@
#include <linux/device.h>
#include <linux/list.h>
#include <linux/klist.h>
#include <linux/spinlock.h>
struct attribute_container {
struct list_head node;
struct list_head containers;
spinlock_t containers_lock;
struct klist containers;
struct class *class;
struct class_device_attribute **attrs;
int (*match)(struct attribute_container *, struct device *);

View file

@ -0,0 +1,59 @@
/*
*/
#include <linux/transport_class.h>
struct raid_template {
struct transport_container raid_attrs;
};
struct raid_function_template {
void *cookie;
int (*is_raid)(struct device *);
void (*get_resync)(struct device *);
void (*get_state)(struct device *);
};
enum raid_state {
RAID_ACTIVE = 1,
RAID_DEGRADED,
RAID_RESYNCING,
RAID_OFFLINE,
};
struct raid_data {
struct list_head component_list;
int component_count;
int level;
enum raid_state state;
int resync;
};
#define DEFINE_RAID_ATTRIBUTE(type, attr) \
static inline void \
raid_set_##attr(struct raid_template *r, struct device *dev, type value) { \
struct class_device *cdev = \
attribute_container_find_class_device(&r->raid_attrs.ac, dev);\
struct raid_data *rd; \
BUG_ON(!cdev); \
rd = class_get_devdata(cdev); \
rd->attr = value; \
} \
static inline type \
raid_get_##attr(struct raid_template *r, struct device *dev) { \
struct class_device *cdev = \
attribute_container_find_class_device(&r->raid_attrs.ac, dev);\
struct raid_data *rd; \
BUG_ON(!cdev); \
rd = class_get_devdata(cdev); \
return rd->attr; \
}
DEFINE_RAID_ATTRIBUTE(int, level)
DEFINE_RAID_ATTRIBUTE(int, resync)
DEFINE_RAID_ATTRIBUTE(enum raid_state, state)
struct raid_template *raid_class_attach(struct raid_function_template *);
void raid_class_release(struct raid_template *);
void raid_component_add(struct raid_template *, struct device *,
struct device *);

View file

@ -35,9 +35,6 @@ static inline int scsi_sense_valid(struct scsi_sense_hdr *sshdr)
}
extern void scsi_add_timer(struct scsi_cmnd *, int,
void (*)(struct scsi_cmnd *));
extern int scsi_delete_timer(struct scsi_cmnd *);
extern void scsi_report_bus_reset(struct Scsi_Host *, int);
extern void scsi_report_device_reset(struct Scsi_Host *, int, int);
extern int scsi_block_when_processing_errors(struct scsi_device *);

View file

@ -467,12 +467,10 @@ struct Scsi_Host {
struct task_struct * ehandler; /* Error recovery thread. */
struct semaphore * eh_wait; /* The error recovery thread waits
on this. */
struct completion * eh_notify; /* wait for eh to begin or end */
struct semaphore * eh_action; /* Wait for specific actions on the
host. */
unsigned int eh_active:1; /* Indicates the eh thread is awake and active if
this is true. */
unsigned int eh_kill:1; /* set when killing the eh thread */
wait_queue_head_t host_wait;
struct scsi_host_template *hostt;
struct scsi_transport_template *transportt;