aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/acapps.h8
-rw-r--r--drivers/acpi/acpica/acglobal.h5
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acmacros.h13
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h6
-rw-r--r--drivers/acpi/acpica/acstruct.h5
-rw-r--r--drivers/acpi/acpica/actables.h9
-rw-r--r--drivers/acpi/acpica/acutils.h22
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/dsopcode.c7
-rw-r--r--drivers/acpi/acpica/dsutils.c11
-rw-r--r--drivers/acpi/acpica/evgpe.c5
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c5
-rw-r--r--drivers/acpi/acpica/exdump.c4
-rw-r--r--drivers/acpi/acpica/exfldio.c10
-rw-r--r--drivers/acpi/acpica/exoparg3.c13
-rw-r--r--drivers/acpi/acpica/exregion.c17
-rw-r--r--drivers/acpi/acpica/hwgpe.c24
-rw-r--r--drivers/acpi/acpica/hwvalid.c16
-rw-r--r--drivers/acpi/acpica/nsdump.c12
-rw-r--r--drivers/acpi/acpica/psopcode.c8
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/rsdump.c227
-rw-r--r--drivers/acpi/acpica/tbdata.c35
-rw-r--r--drivers/acpi/acpica/tbinstal.c67
-rw-r--r--drivers/acpi/acpica/tbprint.c19
-rw-r--r--drivers/acpi/acpica/tbxfroot.c7
-rw-r--r--drivers/acpi/acpica/utaddress.c34
-rw-r--r--drivers/acpi/acpica/utbuffer.c8
-rw-r--r--drivers/acpi/acpica/utglobal.c13
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utosi.c1
-rw-r--r--drivers/acpi/acpica/utprint.c13
-rw-r--r--drivers/acpi/acpica/utstate.c34
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/bcma/driver_mips.c2
-rw-r--r--drivers/block/drbd/drbd_main.c7
-rw-r--r--drivers/block/drbd/drbd_req.c3
-rw-r--r--drivers/block/loop.c294
-rw-r--r--drivers/block/nbd.c140
-rw-r--r--drivers/block/nvme-core.c159
-rw-r--r--drivers/block/nvme-scsi.c28
-rw-r--r--drivers/block/virtio_blk.c9
-rw-r--r--drivers/block/xen-blkback/blkback.c62
-rw-r--r--drivers/block/xen-blkback/common.h6
-rw-r--r--drivers/block/xen-blkback/xenbus.c38
-rw-r--r--drivers/bus/Kconfig13
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/mips_cdmm.c716
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/bcm/clk-kona.c28
-rw-r--r--drivers/clk/bcm/clk-kona.h1
-rw-r--r--drivers/clk/pistachio/Makefile3
-rw-r--r--drivers/clk/pistachio/clk-pistachio.c329
-rw-r--r--drivers/clk/pistachio/clk-pll.c401
-rw-r--r--drivers/clk/pistachio/clk.c140
-rw-r--r--drivers/clk/pistachio/clk.h174
-rw-r--r--drivers/clocksource/Kconfig2
-rw-r--r--drivers/clocksource/mips-gic-timer.c13
-rw-r--r--drivers/cpuidle/governors/menu.c8
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c1
-rw-r--r--drivers/hwmon/ina2xx.c17
-rw-r--r--drivers/hwmon/lm85.c26
-rw-r--r--drivers/hwmon/w83795.c8
-rw-r--r--drivers/irqchip/Kconfig5
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c335
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c193
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c9
-rw-r--r--drivers/irqchip/irq-mips-gic.c40
-rw-r--r--drivers/mailbox/Kconfig9
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/arm_mhu.c195
-rw-r--r--drivers/mailbox/pcc.c122
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_c.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c6
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_priv.h2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t2.c2
-rw-r--r--drivers/memstick/core/mspro_block.c3
-rw-r--r--drivers/mmc/card/block.c4
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/oprofile/buffer_sync.c30
-rw-r--r--drivers/platform/Kconfig3
-rw-r--r--drivers/platform/Makefile1
-rw-r--r--drivers/platform/mips/Kconfig30
-rw-r--r--drivers/platform/mips/Makefile2
-rw-r--r--drivers/platform/mips/acpi_init.c150
-rw-r--r--drivers/platform/mips/cpu_hwmon.c207
-rw-r--r--drivers/rtc/Kconfig16
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/class.c6
-rw-r--r--drivers/rtc/hctosys.c6
-rw-r--r--drivers/rtc/interface.c5
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c4
-rw-r--r--drivers/rtc/rtc-cmos.c6
-rw-r--r--drivers/rtc/rtc-da9052.c97
-rw-r--r--drivers/rtc/rtc-digicolor.c227
-rw-r--r--drivers/rtc/rtc-ds1374.c8
-rw-r--r--drivers/rtc/rtc-ds1685.c9
-rw-r--r--drivers/rtc/rtc-ds3232.c6
-rw-r--r--drivers/rtc/rtc-efi-platform.c3
-rw-r--r--drivers/rtc/rtc-em3027.c11
-rw-r--r--drivers/rtc/rtc-hym8563.c12
-rw-r--r--drivers/rtc/rtc-m41t80.c6
-rw-r--r--drivers/rtc/rtc-max77686.c6
-rw-r--r--drivers/rtc/rtc-max8997.c8
-rw-r--r--drivers/rtc/rtc-msm6242.c4
-rw-r--r--drivers/rtc/rtc-omap.c68
-rw-r--r--drivers/rtc/rtc-opal.c3
-rw-r--r--drivers/rtc/rtc-pcf8563.c7
-rw-r--r--drivers/rtc/rtc-s3c.c193
-rw-r--r--drivers/rtc/rtc-s5m.c34
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c66
-rw-r--r--drivers/rtc/rtc-twl.c9
-rw-r--r--drivers/rtc/rtc-x1205.c4
-rw-r--r--drivers/scsi/NCR5380.c10
-rw-r--r--drivers/scsi/aacraid/aachba.c410
-rw-r--r--drivers/scsi/aacraid/aacraid.h106
-rw-r--r--drivers/scsi/aacraid/commctrl.c10
-rw-r--r--drivers/scsi/aacraid/comminit.c106
-rw-r--r--drivers/scsi/aacraid/commsup.c96
-rw-r--r--drivers/scsi/aacraid/dpcsup.c13
-rw-r--r--drivers/scsi/aacraid/linit.c61
-rw-r--r--drivers/scsi/aacraid/rx.c14
-rw-r--r--drivers/scsi/aacraid/src.c438
-rw-r--r--drivers/scsi/aha1542.c1687
-rw-r--r--drivers/scsi/aha1542.h136
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c3
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c3
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c10
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c3
-rw-r--r--drivers/scsi/atari_NCR5380.c2
-rw-r--r--drivers/scsi/atari_scsi.c1
-rw-r--r--drivers/scsi/bfa/bfad.c22
-rw-r--r--drivers/scsi/g_NCR5380.c8
-rw-r--r--drivers/scsi/ipr.c319
-rw-r--r--drivers/scsi/ipr.h15
-rw-r--r--drivers/scsi/lpfc/lpfc.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c738
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c74
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h208
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c264
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c83
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c72
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/mac_scsi.c1
-rw-r--r--drivers/scsi/qla2xxx/Kconfig3
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h16
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c75
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c31
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi.c20
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_transport_fc.c1
-rw-r--r--drivers/scsi/sd.c63
-rw-r--r--drivers/scsi/storvsc_drv.c232
-rw-r--r--drivers/scsi/sun3_scsi.c1
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c36
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h26
-rw-r--r--drivers/scsi/ufs/ufshcd.c35
-rw-r--r--drivers/scsi/ufs/ufshcd.h9
-rw-r--r--drivers/ssb/Kconfig1
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c2
-rw-r--r--drivers/ssb/driver_mipscore.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c22
-rw-r--r--drivers/staging/octeon/ethernet-tx.c5
-rw-r--r--drivers/staging/octeon/ethernet.c10
-rw-r--r--drivers/tty/Kconfig47
-rw-r--r--drivers/tty/Makefile1
-rw-r--r--drivers/tty/mips_ejtag_fdc.c1303
194 files changed, 9077 insertions, 3467 deletions
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index d863016565b5..e9f0833e818d 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -64,15 +64,15 @@
/* Macros for signons and file headers */
#define ACPI_COMMON_SIGNON(utility_name) \
- "\n%s\n%s version %8.8X%s [%s]\n%s\n\n", \
+ "\n%s\n%s version %8.8X%s\n%s\n\n", \
ACPICA_NAME, \
- utility_name, ((u32) ACPI_CA_VERSION), ACPI_WIDTH, __DATE__, \
+ utility_name, ((u32) ACPI_CA_VERSION), ACPI_WIDTH, \
ACPICA_COPYRIGHT
#define ACPI_COMMON_HEADER(utility_name, prefix) \
- "%s%s\n%s%s version %8.8X%s [%s]\n%s%s\n%s\n", \
+ "%s%s\n%s%s version %8.8X%s\n%s%s\n%s\n", \
prefix, ACPICA_NAME, \
- prefix, utility_name, ((u32) ACPI_CA_VERSION), ACPI_WIDTH, __DATE__, \
+ prefix, utility_name, ((u32) ACPI_CA_VERSION), ACPI_WIDTH, \
prefix, ACPICA_COPYRIGHT, \
prefix
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index a165d25343e8..a0c478784314 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -306,6 +306,7 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_db_output_flags, ACPI_DB_CONSOLE_OUTPUT);
ACPI_INIT_GLOBAL(u8, acpi_gbl_no_resource_disassembly, FALSE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_noop_operator, FALSE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_cstyle_disassembly, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_force_aml_disassembly, FALSE);
ACPI_GLOBAL(u8, acpi_gbl_db_opt_disasm);
ACPI_GLOBAL(u8, acpi_gbl_db_opt_verbose);
@@ -321,9 +322,7 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_db_terminate_threads, FALSE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_method_executing, FALSE);
-ACPI_GLOBAL(u8, acpi_gbl_db_opt_tables);
-ACPI_GLOBAL(u8, acpi_gbl_db_opt_stats);
-ACPI_GLOBAL(u8, acpi_gbl_db_opt_ini_methods);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_no_ini_methods);
ACPI_GLOBAL(u8, acpi_gbl_db_opt_no_region_support);
ACPI_GLOBAL(u8, acpi_gbl_db_output_to_file);
ACPI_GLOBAL(char *, acpi_gbl_db_buffer);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 7add32e5d8c5..87b27521fcac 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -53,7 +53,7 @@ typedef u32 acpi_mutex_handle;
/* Total number of aml opcodes defined */
-#define AML_NUM_OPCODES 0x81
+#define AML_NUM_OPCODES 0x82
/* Forward declarations */
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index cf607fe69dbd..c240bdf824f2 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -63,23 +63,12 @@
#define ACPI_SET64(ptr, val) (*ACPI_CAST64 (ptr) = (u64) (val))
/*
- * printf() format helpers. These macros are workarounds for the difficulties
+ * printf() format helper. This macros is a workaround for the difficulties
* with emitting 64-bit integers and 64-bit pointers with the same code
* for both 32-bit and 64-bit hosts.
*/
#define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i), ACPI_LODWORD(i)
-#if ACPI_MACHINE_WIDTH == 64
-#define ACPI_FORMAT_NATIVE_UINT(i) ACPI_FORMAT_UINT64(i)
-#define ACPI_FORMAT_TO_UINT(i) ACPI_FORMAT_UINT64(i)
-#define ACPI_PRINTF_UINT "0x%8.8X%8.8X"
-
-#else
-#define ACPI_FORMAT_NATIVE_UINT(i) 0, (u32) (i)
-#define ACPI_FORMAT_TO_UINT(i) (u32) (i)
-#define ACPI_PRINTF_UINT "0x%8.8X"
-#endif
-
/*
* Macros for moving data around to/from buffers that are possibly unaligned.
* If the hardware supports the transfer of unaligned data, just do the store.
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index a5f17de45ac6..fd85ad05a24a 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -111,6 +111,7 @@
#define ARGP_DWORD_OP ARGP_LIST1 (ARGP_DWORDDATA)
#define ARGP_ELSE_OP ARGP_LIST2 (ARGP_PKGLENGTH, ARGP_TERMLIST)
#define ARGP_EVENT_OP ARGP_LIST1 (ARGP_NAME)
+#define ARGP_EXTERNAL_OP ARGP_LIST3 (ARGP_NAMESTRING, ARGP_BYTEDATA, ARGP_BYTEDATA)
#define ARGP_FATAL_OP ARGP_LIST3 (ARGP_BYTEDATA, ARGP_DWORDDATA, ARGP_TERMARG)
#define ARGP_FIELD_OP ARGP_LIST4 (ARGP_PKGLENGTH, ARGP_NAMESTRING, ARGP_BYTEDATA, ARGP_FIELDLIST)
#define ARGP_FIND_SET_LEFT_BIT_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET)
@@ -243,6 +244,7 @@
#define ARGI_DWORD_OP ARGI_INVALID_OPCODE
#define ARGI_ELSE_OP ARGI_INVALID_OPCODE
#define ARGI_EVENT_OP ARGI_INVALID_OPCODE
+#define ARGI_EXTERNAL_OP ARGI_LIST3 (ARGI_STRING, ARGI_INTEGER, ARGI_INTEGER)
#define ARGI_FATAL_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_INTEGER)
#define ARGI_FIELD_OP ARGI_INVALID_OPCODE
#define ARGI_FIND_SET_LEFT_BIT_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_TARGETREF)
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index efc4c7124ccc..6357efb01b93 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -299,11 +299,13 @@ acpi_rs_set_resource_length(acpi_rsdesc_size total_length,
union aml_resource *aml);
/*
- * rsdump
+ * rsdump - Debugger support
*/
+#ifdef ACPI_DEBUGGER
void acpi_rs_dump_resource_list(struct acpi_resource *resource);
-void acpi_rs_dump_irq_list(u8 * route_table);
+void acpi_rs_dump_irq_list(u8 *route_table);
+#endif
/*
* Resource conversion tables
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index d14b547b7cd5..87c7860b3394 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -68,11 +68,6 @@
#define ACPI_WALK_METHOD 0x01
#define ACPI_WALK_METHOD_RESTART 0x02
-/* Flags for iASL compiler only */
-
-#define ACPI_WALK_CONST_REQUIRED 0x10
-#define ACPI_WALK_CONST_OPTIONAL 0x20
-
struct acpi_walk_state {
struct acpi_walk_state *next; /* Next walk_state in list */
u8 descriptor_type; /* To differentiate various internal objs */
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 1c127a43017b..7e0b6f1bec9c 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -58,7 +58,9 @@ u8 *acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length);
/*
* tbdata - table data structure management
*/
-acpi_status acpi_tb_get_next_root_index(u32 *table_index);
+acpi_status
+acpi_tb_get_next_table_descriptor(u32 *table_index,
+ struct acpi_table_desc **table_desc);
void
acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc,
@@ -119,11 +121,6 @@ acpi_tb_install_standard_table(acpi_physical_address address,
u8 flags,
u8 reload, u8 override, u32 *table_index);
-acpi_status
-acpi_tb_store_table(acpi_physical_address address,
- struct acpi_table_header *table,
- u32 length, u8 flags, u32 *table_index);
-
void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc);
void acpi_tb_terminate(void);
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index c2f03e8774ad..2b3c5bd222f1 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -502,6 +502,9 @@ const union acpi_predefined_info *acpi_ut_get_next_predefined_method(const union
const union acpi_predefined_info *acpi_ut_match_predefined_method(char *name);
+void acpi_ut_get_expected_return_types(char *buffer, u32 expected_btypes);
+
+#if (defined ACPI_ASL_COMPILER || defined ACPI_HELP_APP)
const union acpi_predefined_info *acpi_ut_match_resource_name(char *name);
void
@@ -509,9 +512,8 @@ acpi_ut_display_predefined_method(char *buffer,
const union acpi_predefined_info *this_name,
u8 multi_line);
-void acpi_ut_get_expected_return_types(char *buffer, u32 expected_btypes);
-
u32 acpi_ut_get_resource_bit_width(char *buffer, u16 types);
+#endif
/*
* utstate - Generic state creation/cache routines
@@ -539,14 +541,6 @@ acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
u16 action,
union acpi_generic_state **state_list);
-#ifdef ACPI_FUTURE_USAGE
-acpi_status
-acpi_ut_create_pkg_state_and_push(void *internal_object,
- void *external_object,
- u16 index,
- union acpi_generic_state **state_list);
-#endif /* ACPI_FUTURE_USAGE */
-
union acpi_generic_state *acpi_ut_create_control_state(void);
void acpi_ut_delete_generic_state(union acpi_generic_state *state);
@@ -570,7 +564,9 @@ const struct acpi_exception_info *acpi_ut_validate_exception(acpi_status
u8 acpi_ut_is_pci_root_bridge(char *id);
+#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP)
u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
+#endif
acpi_status
acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
@@ -629,15 +625,19 @@ acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag);
*/
void acpi_ut_strupr(char *src_string);
+#ifdef ACPI_ASL_COMPILER
void acpi_ut_strlwr(char *src_string);
int acpi_ut_stricmp(char *string1, char *string2);
+#endif
acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
void acpi_ut_print_string(char *string, u16 max_length);
+#if defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP
void ut_convert_backslashes(char *pathname);
+#endif
u8 acpi_ut_valid_acpi_name(char *name);
@@ -785,6 +785,8 @@ int acpi_ut_file_printf(ACPI_FILE file, const char *format, ...);
/*
* utuuid -- UUID support functions
*/
+#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_HELP_APP)
void acpi_ut_convert_string_to_uuid(char *in_string, u8 *uuid_buffer);
+#endif
#endif /* _ACUTILS_H */
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 3a95068fc119..be9fd009cb28 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -65,6 +65,7 @@
#define AML_PACKAGE_OP (u16) 0x12
#define AML_VAR_PACKAGE_OP (u16) 0x13 /* ACPI 2.0 */
#define AML_METHOD_OP (u16) 0x14
+#define AML_EXTERNAL_OP (u16) 0x15 /* ACPI 6.0 */
#define AML_DUAL_NAME_PREFIX (u16) 0x2e
#define AML_MULTI_NAME_PREFIX_OP (u16) 0x2f
#define AML_NAME_CHAR_SUBSEQ (u16) 0x30
@@ -206,7 +207,6 @@
#define AML_INT_RESERVEDFIELD_OP (u16) 0x0031
#define AML_INT_ACCESSFIELD_OP (u16) 0x0032
#define AML_INT_BYTELIST_OP (u16) 0x0033
-#define AML_INT_STATICSTRING_OP (u16) 0x0034
#define AML_INT_METHODCALL_OP (u16) 0x0035
#define AML_INT_RETURN_VALUE_OP (u16) 0x0036
#define AML_INT_EVAL_SUBTREE_OP (u16) 0x0037
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 77244182ff02..ea0cc4e08f80 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -446,7 +446,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
obj_desc,
- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
+ ACPI_FORMAT_UINT64(obj_desc->region.address),
obj_desc->region.length));
/* Now the address and length are valid for this opregion */
@@ -539,13 +539,12 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(AE_NOT_EXIST);
}
- obj_desc->region.address =
- (acpi_physical_address) ACPI_TO_INTEGER(table);
+ obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table);
obj_desc->region.length = table->length;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
obj_desc,
- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
+ ACPI_FORMAT_UINT64(obj_desc->region.address),
obj_desc->region.length));
/* Now the address and length are valid for this opregion */
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index e5ff89bcb3f5..deeddd6d2f05 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -564,6 +564,17 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
acpi_operand_object,
acpi_gbl_root_node);
status = AE_OK;
+ } else if (parent_op->common.aml_opcode ==
+ AML_EXTERNAL_OP) {
+
+ /* TBD: May only be temporary */
+
+ obj_desc =
+ acpi_ut_create_string_object((acpi_size) name_length);
+
+ ACPI_STRNCPY(obj_desc->string.pointer,
+ name_string, name_length);
+ status = AE_OK;
} else {
/*
* We just plain didn't find it -- which is a
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 5ed064e8673c..ccf793247447 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -92,6 +92,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
ACPI_SET_BIT(gpe_register_info->enable_for_run,
(u8)register_bit);
}
+ gpe_register_info->enable_mask = gpe_register_info->enable_for_run;
return_ACPI_STATUS(AE_OK);
}
@@ -123,7 +124,7 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
/* Enable the requested GPE */
- status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE_SAVE);
+ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
return_ACPI_STATUS(status);
}
@@ -202,7 +203,7 @@ acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
if (ACPI_SUCCESS(status)) {
status =
acpi_hw_low_set_gpe(gpe_event_info,
- ACPI_GPE_DISABLE_SAVE);
+ ACPI_GPE_DISABLE);
}
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 9abace3401f9..2ba28a63fb68 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -272,7 +272,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
"Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
&region_obj->region.handler->address_space, handler,
- ACPI_FORMAT_NATIVE_UINT(address),
+ ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(region_obj->region.
space_id)));
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index df06a23c4197..faad911d46b5 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -356,7 +356,8 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
}
if (in_byte) {
- local_event_status |= ACPI_EVENT_FLAG_ENABLED;
+ local_event_status |=
+ (ACPI_EVENT_FLAG_ENABLED | ACPI_EVENT_FLAG_ENABLE_SET);
}
/* Fixed event currently active? */
@@ -369,7 +370,7 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
}
if (in_byte) {
- local_event_status |= ACPI_EVENT_FLAG_SET;
+ local_event_status |= ACPI_EVENT_FLAG_STATUS_SET;
}
(*event_status) = local_event_status;
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 7c213b6b6472..1da52bef632e 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -767,8 +767,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
acpi_os_printf("\n");
} else {
acpi_os_printf(" base %8.8X%8.8X Length %X\n",
- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.
- address),
+ ACPI_FORMAT_UINT64(obj_desc->region.
+ address),
obj_desc->region.length);
}
break;
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 49479927e7f7..725a3746a2df 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -263,17 +263,15 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
- " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
+ " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id,
obj_desc->common_field.access_byte_width,
obj_desc->common_field.base_byte_offset,
- field_datum_byte_offset, ACPI_CAST_PTR(void,
- (rgn_desc->
- region.
- address +
- region_offset))));
+ field_datum_byte_offset,
+ ACPI_FORMAT_UINT64(rgn_desc->region.address +
+ region_offset)));
/* Invoke the appropriate address_space/op_region handler */
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index b813fed95e56..1c64a988cbee 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -114,7 +114,18 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
/* Might return while OS is shutting down, just continue */
ACPI_FREE(fatal);
- break;
+ goto cleanup;
+
+ case AML_EXTERNAL_OP:
+ /*
+ * If the interpreter sees this opcode, just ignore it. The External
+ * op is intended for use by disassemblers in order to properly
+ * disassemble control method invocations. The opcode or group of
+ * opcodes should be surrounded by an "if (0)" clause to ensure that
+ * AML interpreters never see the opcode.
+ */
+ status = AE_OK;
+ goto cleanup;
default:
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 0fe188e238ef..f6c2f5499935 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -165,8 +165,8 @@ acpi_ex_system_memory_space_handler(u32 function,
* one page, which is similar to the original code that used a 4k
* maximum window.
*/
- page_boundary_map_length =
- ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address;
+ page_boundary_map_length = (acpi_size)
+ (ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address);
if (page_boundary_map_length == 0) {
page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
}
@@ -177,12 +177,13 @@ acpi_ex_system_memory_space_handler(u32 function,
/* Create a new mapping starting at the address given */
- mem_info->mapped_logical_address = acpi_os_map_memory((acpi_physical_address) address, map_length);
+ mem_info->mapped_logical_address =
+ acpi_os_map_memory(address, map_length);
if (!mem_info->mapped_logical_address) {
ACPI_ERROR((AE_INFO,
"Could not map memory at 0x%8.8X%8.8X, size %u",
- ACPI_FORMAT_NATIVE_UINT(address),
- (u32) map_length));
+ ACPI_FORMAT_UINT64(address),
+ (u32)map_length));
mem_info->mapped_length = 0;
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -202,8 +203,7 @@ acpi_ex_system_memory_space_handler(u32 function,
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n",
- bit_width, function,
- ACPI_FORMAT_NATIVE_UINT(address)));
+ bit_width, function, ACPI_FORMAT_UINT64(address)));
/*
* Perform the memory read or write
@@ -318,8 +318,7 @@ acpi_ex_system_io_space_handler(u32 function,
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"System-IO (width %u) R/W %u Address=%8.8X%8.8X\n",
- bit_width, function,
- ACPI_FORMAT_NATIVE_UINT(address)));
+ bit_width, function, ACPI_FORMAT_UINT64(address)));
/* Decode the function parameter */
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 84bc550f4f1d..73cfa5947ff3 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -89,6 +89,8 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info)
* RETURN: Status
*
* DESCRIPTION: Enable or disable a single GPE in the parent enable register.
+ * The enable_mask field of the involved GPE register must be
+ * updated by the caller if necessary.
*
******************************************************************************/
@@ -119,7 +121,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
/* Set or clear just the bit that corresponds to this GPE */
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
- switch (action & ~ACPI_GPE_SAVE_MASK) {
+ switch (action) {
case ACPI_GPE_CONDITIONAL_ENABLE:
/* Only enable if the corresponding enable_mask bit is set */
@@ -149,9 +151,6 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
/* Write the updated enable mask */
status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
- if (ACPI_SUCCESS(status) && (action & ACPI_GPE_SAVE_MASK)) {
- gpe_register_info->enable_mask = (u8)enable_mask;
- }
return (status);
}
@@ -250,6 +249,17 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
local_event_status |= ACPI_EVENT_FLAG_WAKE_ENABLED;
}
+ /* GPE currently enabled (enable bit == 1)? */
+
+ status = acpi_hw_read(&in_byte, &gpe_register_info->enable_address);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ if (register_bit & in_byte) {
+ local_event_status |= ACPI_EVENT_FLAG_ENABLE_SET;
+ }
+
/* GPE currently active (status bit == 1)? */
status = acpi_hw_read(&in_byte, &gpe_register_info->status_address);
@@ -258,7 +268,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
}
if (register_bit & in_byte) {
- local_event_status |= ACPI_EVENT_FLAG_SET;
+ local_event_status |= ACPI_EVENT_FLAG_STATUS_SET;
}
/* Set return value */
@@ -286,10 +296,8 @@ acpi_hw_gpe_enable_write(u8 enable_mask,
{
acpi_status status;
+ gpe_register_info->enable_mask = enable_mask;
status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
- if (ACPI_SUCCESS(status)) {
- gpe_register_info->enable_mask = enable_mask;
- }
return (status);
}
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 2bd33fe56cb3..29033d71417b 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -142,17 +142,17 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
byte_width = ACPI_DIV_8(bit_width);
last_address = address + byte_width - 1;
- ACPI_DEBUG_PRINT((ACPI_DB_IO, "Address %p LastAddress %p Length %X",
- ACPI_CAST_PTR(void, address), ACPI_CAST_PTR(void,
- last_address),
- byte_width));
+ ACPI_DEBUG_PRINT((ACPI_DB_IO,
+ "Address %8.8X%8.8X LastAddress %8.8X%8.8X Length %X",
+ ACPI_FORMAT_UINT64(address),
+ ACPI_FORMAT_UINT64(last_address), byte_width));
/* Maximum 16-bit address in I/O space */
if (last_address > ACPI_UINT16_MAX) {
ACPI_ERROR((AE_INFO,
- "Illegal I/O port address/length above 64K: %p/0x%X",
- ACPI_CAST_PTR(void, address), byte_width));
+ "Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
+ ACPI_FORMAT_UINT64(address), byte_width));
return_ACPI_STATUS(AE_LIMIT);
}
@@ -181,8 +181,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
if (acpi_gbl_osi_data >= port_info->osi_dependency) {
ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Denied AML access to port 0x%p/%X (%s 0x%.4X-0x%.4X)",
- ACPI_CAST_PTR(void, address),
+ "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)",
+ ACPI_FORMAT_UINT64(address),
byte_width, port_info->name,
port_info->start,
port_info->end));
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 80f097eb7381..d259393505fa 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -271,12 +271,11 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
switch (type) {
case ACPI_TYPE_PROCESSOR:
- acpi_os_printf("ID %02X Len %02X Addr %p\n",
+ acpi_os_printf("ID %02X Len %02X Addr %8.8X%8.8X\n",
obj_desc->processor.proc_id,
obj_desc->processor.length,
- ACPI_CAST_PTR(void,
- obj_desc->processor.
- address));
+ ACPI_FORMAT_UINT64(obj_desc->processor.
+ address));
break;
case ACPI_TYPE_DEVICE:
@@ -347,8 +346,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
space_id));
if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n",
- ACPI_FORMAT_NATIVE_UINT
- (obj_desc->region.address),
+ ACPI_FORMAT_UINT64(obj_desc->
+ region.
+ address),
obj_desc->region.length);
} else {
acpi_os_printf
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 1af4a405e351..ed90fddf2487 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -646,7 +646,13 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_HAS_ARGS),
/* 80 */ ACPI_OP("-ExtAccessField-", ARGP_CONNECTFIELD_OP,
ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
- AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0)
+ AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
+
+/* ACPI 6.0 opcodes */
+
+ /* 81 */ ACPI_OP("External", ARGP_EXTERNAL_OP, ARGI_EXTERNAL_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE, /* ? */
+ AML_TYPE_EXEC_3A_0T_0R, AML_FLAGS_EXEC_3A_0T_0R)
/*! [End] no source code translation !*/
};
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index e18e7c47f482..20e1a35169fc 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -210,7 +210,7 @@ const u8 acpi_gbl_short_op_index[256] = {
/* 8 9 A B C D E F */
/* 0x00 */ 0x00, 0x01, _UNK, _UNK, _UNK, _UNK, 0x02, _UNK,
/* 0x08 */ 0x03, _UNK, 0x04, 0x05, 0x06, 0x07, 0x6E, _UNK,
-/* 0x10 */ 0x08, 0x09, 0x0a, 0x6F, 0x0b, _UNK, _UNK, _UNK,
+/* 0x10 */ 0x08, 0x09, 0x0a, 0x6F, 0x0b, 0x81, _UNK, _UNK,
/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 1539394c8c52..c428bb33204e 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -1,6 +1,6 @@
/*******************************************************************************
*
- * Module Name: rsdump - Functions to display the resource structures.
+ * Module Name: rsdump - AML debugger support for resource structures.
*
******************************************************************************/
@@ -48,7 +48,10 @@
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rsdump")
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUGGER)
+/*
+ * All functions in this module are used by the AML Debugger only
+ */
+#if defined(ACPI_DEBUGGER)
/* Local prototypes */
static void acpi_rs_out_string(char *title, char *value);
@@ -80,6 +83,116 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table);
/*******************************************************************************
*
+ * FUNCTION: acpi_rs_dump_resource_list
+ *
+ * PARAMETERS: resource_list - Pointer to a resource descriptor list
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Dispatches the structure to the correct dump routine.
+ *
+ ******************************************************************************/
+
+void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
+{
+ u32 count = 0;
+ u32 type;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) {
+ return;
+ }
+
+ /* Walk list and dump all resource descriptors (END_TAG terminates) */
+
+ do {
+ acpi_os_printf("\n[%02X] ", count);
+ count++;
+
+ /* Validate Type before dispatch */
+
+ type = resource_list->type;
+ if (type > ACPI_RESOURCE_TYPE_MAX) {
+ acpi_os_printf
+ ("Invalid descriptor type (%X) in resource list\n",
+ resource_list->type);
+ return;
+ }
+
+ /* Sanity check the length. It must not be zero, or we loop forever */
+
+ if (!resource_list->length) {
+ acpi_os_printf
+ ("Invalid zero length descriptor in resource list\n");
+ return;
+ }
+
+ /* Dump the resource descriptor */
+
+ if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ acpi_rs_dump_descriptor(&resource_list->data,
+ acpi_gbl_dump_serial_bus_dispatch
+ [resource_list->data.
+ common_serial_bus.type]);
+ } else {
+ acpi_rs_dump_descriptor(&resource_list->data,
+ acpi_gbl_dump_resource_dispatch
+ [type]);
+ }
+
+ /* Point to the next resource structure */
+
+ resource_list = ACPI_NEXT_RESOURCE(resource_list);
+
+ /* Exit when END_TAG descriptor is reached */
+
+ } while (type != ACPI_RESOURCE_TYPE_END_TAG);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_dump_irq_list
+ *
+ * PARAMETERS: route_table - Pointer to the routing table to dump.
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print IRQ routing table
+ *
+ ******************************************************************************/
+
+void acpi_rs_dump_irq_list(u8 *route_table)
+{
+ struct acpi_pci_routing_table *prt_element;
+ u8 count;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) {
+ return;
+ }
+
+ prt_element = ACPI_CAST_PTR(struct acpi_pci_routing_table, route_table);
+
+ /* Dump all table elements, Exit on zero length element */
+
+ for (count = 0; prt_element->length; count++) {
+ acpi_os_printf("\n[%02X] PCI IRQ Routing Table Package\n",
+ count);
+ acpi_rs_dump_descriptor(prt_element, acpi_rs_dump_prt);
+
+ prt_element = ACPI_ADD_PTR(struct acpi_pci_routing_table,
+ prt_element, prt_element->length);
+ }
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_rs_dump_descriptor
*
* PARAMETERS: resource - Buffer containing the resource
@@ -357,116 +470,6 @@ static void acpi_rs_dump_address_common(union acpi_resource_data *resource)
/*******************************************************************************
*
- * FUNCTION: acpi_rs_dump_resource_list
- *
- * PARAMETERS: resource_list - Pointer to a resource descriptor list
- *
- * RETURN: None
- *
- * DESCRIPTION: Dispatches the structure to the correct dump routine.
- *
- ******************************************************************************/
-
-void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
-{
- u32 count = 0;
- u32 type;
-
- ACPI_FUNCTION_ENTRY();
-
- /* Check if debug output enabled */
-
- if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) {
- return;
- }
-
- /* Walk list and dump all resource descriptors (END_TAG terminates) */
-
- do {
- acpi_os_printf("\n[%02X] ", count);
- count++;
-
- /* Validate Type before dispatch */
-
- type = resource_list->type;
- if (type > ACPI_RESOURCE_TYPE_MAX) {
- acpi_os_printf
- ("Invalid descriptor type (%X) in resource list\n",
- resource_list->type);
- return;
- }
-
- /* Sanity check the length. It must not be zero, or we loop forever */
-
- if (!resource_list->length) {
- acpi_os_printf
- ("Invalid zero length descriptor in resource list\n");
- return;
- }
-
- /* Dump the resource descriptor */
-
- if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
- acpi_rs_dump_descriptor(&resource_list->data,
- acpi_gbl_dump_serial_bus_dispatch
- [resource_list->data.
- common_serial_bus.type]);
- } else {
- acpi_rs_dump_descriptor(&resource_list->data,
- acpi_gbl_dump_resource_dispatch
- [type]);
- }
-
- /* Point to the next resource structure */
-
- resource_list = ACPI_NEXT_RESOURCE(resource_list);
-
- /* Exit when END_TAG descriptor is reached */
-
- } while (type != ACPI_RESOURCE_TYPE_END_TAG);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_rs_dump_irq_list
- *
- * PARAMETERS: route_table - Pointer to the routing table to dump.
- *
- * RETURN: None
- *
- * DESCRIPTION: Print IRQ routing table
- *
- ******************************************************************************/
-
-void acpi_rs_dump_irq_list(u8 * route_table)
-{
- struct acpi_pci_routing_table *prt_element;
- u8 count;
-
- ACPI_FUNCTION_ENTRY();
-
- /* Check if debug output enabled */
-
- if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) {
- return;
- }
-
- prt_element = ACPI_CAST_PTR(struct acpi_pci_routing_table, route_table);
-
- /* Dump all table elements, Exit on zero length element */
-
- for (count = 0; prt_element->length; count++) {
- acpi_os_printf("\n[%02X] PCI IRQ Routing Table Package\n",
- count);
- acpi_rs_dump_descriptor(prt_element, acpi_rs_dump_prt);
-
- prt_element = ACPI_ADD_PTR(struct acpi_pci_routing_table,
- prt_element, prt_element->length);
- }
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_rs_out*
*
* PARAMETERS: title - Name of the resource field
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 6a144957aadd..d7f8386455bd 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -113,9 +113,9 @@ acpi_tb_acquire_table(struct acpi_table_desc *table_desc,
case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
- table =
- ACPI_CAST_PTR(struct acpi_table_header,
- table_desc->address);
+ table = ACPI_CAST_PTR(struct acpi_table_header,
+ ACPI_PHYSADDR_TO_PTR(table_desc->
+ address));
break;
default:
@@ -214,7 +214,8 @@ acpi_tb_acquire_temp_table(struct acpi_table_desc *table_desc,
case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
- table_header = ACPI_CAST_PTR(struct acpi_table_header, address);
+ table_header = ACPI_CAST_PTR(struct acpi_table_header,
+ ACPI_PHYSADDR_TO_PTR(address));
if (!table_header) {
return (AE_NO_MEMORY);
}
@@ -398,14 +399,14 @@ acpi_tb_verify_temp_table(struct acpi_table_desc * table_desc, char *signature)
table_desc->length);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
- "%4.4s " ACPI_PRINTF_UINT
+ "%4.4s 0x%8.8X%8.8X"
" Attempted table install failed",
acpi_ut_valid_acpi_name(table_desc->
signature.
ascii) ?
table_desc->signature.ascii : "????",
- ACPI_FORMAT_TO_UINT(table_desc->
- address)));
+ ACPI_FORMAT_UINT64(table_desc->
+ address)));
goto invalidate_and_exit;
}
}
@@ -483,19 +484,23 @@ acpi_status acpi_tb_resize_root_table_list(void)
/*******************************************************************************
*
- * FUNCTION: acpi_tb_get_next_root_index
+ * FUNCTION: acpi_tb_get_next_table_descriptor
*
* PARAMETERS: table_index - Where table index is returned
+ * table_desc - Where table descriptor is returned
*
- * RETURN: Status and table index.
+ * RETURN: Status and table index/descriptor.
*
* DESCRIPTION: Allocate a new ACPI table entry to the global table list
*
******************************************************************************/
-acpi_status acpi_tb_get_next_root_index(u32 *table_index)
+acpi_status
+acpi_tb_get_next_table_descriptor(u32 *table_index,
+ struct acpi_table_desc **table_desc)
{
acpi_status status;
+ u32 i;
/* Ensure that there is room for the table in the Root Table List */
@@ -507,8 +512,16 @@ acpi_status acpi_tb_get_next_root_index(u32 *table_index)
}
}
- *table_index = acpi_gbl_root_table_list.current_table_count;
+ i = acpi_gbl_root_table_list.current_table_count;
acpi_gbl_root_table_list.current_table_count++;
+
+ if (table_index) {
+ *table_index = i;
+ }
+ if (table_desc) {
+ *table_desc = &acpi_gbl_root_table_list.tables[i];
+ }
+
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 9bad45e63a45..008a251780f4 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -187,8 +187,9 @@ acpi_tb_install_fixed_table(acpi_physical_address address,
status = acpi_tb_acquire_temp_table(&new_table_desc, address,
ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "Could not acquire table length at %p",
- ACPI_CAST_PTR(void, address)));
+ ACPI_ERROR((AE_INFO,
+ "Could not acquire table length at %8.8X%8.8X",
+ ACPI_FORMAT_UINT64(address)));
return_ACPI_STATUS(status);
}
@@ -246,8 +247,9 @@ acpi_tb_install_standard_table(acpi_physical_address address,
status = acpi_tb_acquire_temp_table(&new_table_desc, address, flags);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "Could not acquire table length at %p",
- ACPI_CAST_PTR(void, address)));
+ ACPI_ERROR((AE_INFO,
+ "Could not acquire table length at %8.8X%8.8X",
+ ACPI_FORMAT_UINT64(address)));
return_ACPI_STATUS(status);
}
@@ -258,9 +260,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
if (!reload &&
acpi_gbl_disable_ssdt_table_install &&
ACPI_COMPARE_NAME(&new_table_desc.signature, ACPI_SIG_SSDT)) {
- ACPI_INFO((AE_INFO, "Ignoring installation of %4.4s at %p",
- new_table_desc.signature.ascii, ACPI_CAST_PTR(void,
- address)));
+ ACPI_INFO((AE_INFO,
+ "Ignoring installation of %4.4s at %8.8X%8.8X",
+ new_table_desc.signature.ascii,
+ ACPI_FORMAT_UINT64(address)));
goto release_and_exit;
}
@@ -346,7 +349,6 @@ acpi_tb_install_standard_table(acpi_physical_address address,
*/
acpi_tb_uninstall_table(&new_table_desc);
*table_index = i;
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(AE_OK);
}
}
@@ -354,7 +356,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
/* Add the table to the global root table list */
- status = acpi_tb_get_next_root_index(&i);
+ status = acpi_tb_get_next_table_descriptor(&i, NULL);
if (ACPI_FAILURE(status)) {
goto release_and_exit;
}
@@ -429,11 +431,11 @@ finish_override:
return;
}
- ACPI_INFO((AE_INFO, "%4.4s " ACPI_PRINTF_UINT
- " %s table override, new table: " ACPI_PRINTF_UINT,
+ ACPI_INFO((AE_INFO, "%4.4s 0x%8.8X%8.8X"
+ " %s table override, new table: 0x%8.8X%8.8X",
old_table_desc->signature.ascii,
- ACPI_FORMAT_TO_UINT(old_table_desc->address),
- override_type, ACPI_FORMAT_TO_UINT(new_table_desc.address)));
+ ACPI_FORMAT_UINT64(old_table_desc->address),
+ override_type, ACPI_FORMAT_UINT64(new_table_desc.address)));
/* We can now uninstall the original table */
@@ -455,43 +457,6 @@ finish_override:
/*******************************************************************************
*
- * FUNCTION: acpi_tb_store_table
- *
- * PARAMETERS: address - Table address
- * table - Table header
- * length - Table length
- * flags - Install flags
- * table_index - Where the table index is returned
- *
- * RETURN: Status and table index.
- *
- * DESCRIPTION: Add an ACPI table to the global table list
- *
- ******************************************************************************/
-
-acpi_status
-acpi_tb_store_table(acpi_physical_address address,
- struct acpi_table_header * table,
- u32 length, u8 flags, u32 *table_index)
-{
- acpi_status status;
- struct acpi_table_desc *table_desc;
-
- status = acpi_tb_get_next_root_index(table_index);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /* Initialize added table */
-
- table_desc = &acpi_gbl_root_table_list.tables[*table_index];
- acpi_tb_init_table_descriptor(table_desc, address, flags, table);
- table_desc->pointer = table;
- return (AE_OK);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_tb_uninstall_table
*
* PARAMETERS: table_desc - Table descriptor
@@ -517,7 +482,7 @@ void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc)
if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) ==
ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL) {
- ACPI_FREE(ACPI_CAST_PTR(void, table_desc->address));
+ ACPI_FREE(ACPI_PHYSADDR_TO_PTR(table_desc->address));
}
table_desc->address = ACPI_PTR_TO_PHYSADDR(NULL);
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index ef16c06e5091..77ba5c71c6e7 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -127,18 +127,12 @@ acpi_tb_print_table_header(acpi_physical_address address,
{
struct acpi_table_header local_header;
- /*
- * The reason that we use ACPI_PRINTF_UINT and ACPI_FORMAT_TO_UINT is to
- * support both 32-bit and 64-bit hosts/addresses in a consistent manner.
- * The %p specifier does not emit uniform output on all hosts. On some,
- * leading zeros are not supported.
- */
if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
/* FACS only has signature and length fields */
- ACPI_INFO((AE_INFO, "%-4.4s " ACPI_PRINTF_UINT " %06X",
- header->signature, ACPI_FORMAT_TO_UINT(address),
+ ACPI_INFO((AE_INFO, "%-4.4s 0x%8.8X%8.8X %06X",
+ header->signature, ACPI_FORMAT_UINT64(address),
header->length));
} else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
@@ -149,9 +143,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
header)->oem_id, ACPI_OEM_ID_SIZE);
acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
- ACPI_INFO((AE_INFO,
- "RSDP " ACPI_PRINTF_UINT " %06X (v%.2d %-6.6s)",
- ACPI_FORMAT_TO_UINT(address),
+ ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %06X (v%.2d %-6.6s)",
+ ACPI_FORMAT_UINT64(address),
(ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
revision >
0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
@@ -165,9 +158,9 @@ acpi_tb_print_table_header(acpi_physical_address address,
acpi_tb_cleanup_table_header(&local_header, header);
ACPI_INFO((AE_INFO,
- "%-4.4s " ACPI_PRINTF_UINT
+ "%-4.4s 0x%8.8X%8.8X"
" %06X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
- local_header.signature, ACPI_FORMAT_TO_UINT(address),
+ local_header.signature, ACPI_FORMAT_UINT64(address),
local_header.length, local_header.revision,
local_header.oem_id, local_header.oem_table_id,
local_header.oem_revision,
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index eac52cf14f1a..fa76a3603aa1 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -142,7 +142,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp * rsdp)
*
******************************************************************************/
-acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
+acpi_status __init acpi_find_root_pointer(acpi_physical_address * table_address)
{
u8 *table_ptr;
u8 *mem_rover;
@@ -200,7 +200,8 @@ acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
physical_address +=
(u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
- *table_address = physical_address;
+ *table_address =
+ (acpi_physical_address) physical_address;
return_ACPI_STATUS(AE_OK);
}
}
@@ -233,7 +234,7 @@ acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
(ACPI_HI_RSDP_WINDOW_BASE +
ACPI_PTR_DIFF(mem_rover, table_ptr));
- *table_address = physical_address;
+ *table_address = (acpi_physical_address) physical_address;
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index 1279f50da757..911ea8e7fe87 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -107,10 +107,10 @@ acpi_ut_add_address_range(acpi_adr_space_type space_id,
acpi_gbl_address_range_list[space_id] = range_info;
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
- "\nAdded [%4.4s] address range: 0x%p-0x%p\n",
+ "\nAdded [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
acpi_ut_get_node_name(range_info->region_node),
- ACPI_CAST_PTR(void, address),
- ACPI_CAST_PTR(void, range_info->end_address)));
+ ACPI_FORMAT_UINT64(address),
+ ACPI_FORMAT_UINT64(range_info->end_address)));
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_OK);
@@ -160,15 +160,13 @@ acpi_ut_remove_address_range(acpi_adr_space_type space_id,
}
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
- "\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
+ "\nRemoved [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
acpi_ut_get_node_name(range_info->
region_node),
- ACPI_CAST_PTR(void,
- range_info->
- start_address),
- ACPI_CAST_PTR(void,
- range_info->
- end_address)));
+ ACPI_FORMAT_UINT64(range_info->
+ start_address),
+ ACPI_FORMAT_UINT64(range_info->
+ end_address)));
ACPI_FREE(range_info);
return_VOID;
@@ -245,16 +243,14 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
region_node);
ACPI_WARNING((AE_INFO,
- "%s range 0x%p-0x%p conflicts with OpRegion 0x%p-0x%p (%s)",
+ "%s range 0x%8.8X%8.8X-0x%8.8X%8.8X conflicts with OpRegion 0x%8.8X%8.8X-0x%8.8X%8.8X (%s)",
acpi_ut_get_region_name(space_id),
- ACPI_CAST_PTR(void, address),
- ACPI_CAST_PTR(void, end_address),
- ACPI_CAST_PTR(void,
- range_info->
- start_address),
- ACPI_CAST_PTR(void,
- range_info->
- end_address),
+ ACPI_FORMAT_UINT64(address),
+ ACPI_FORMAT_UINT64(end_address),
+ ACPI_FORMAT_UINT64(range_info->
+ start_address),
+ ACPI_FORMAT_UINT64(range_info->
+ end_address),
pathname));
ACPI_FREE(pathname);
}
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index 242bd071f007..a8c39643e618 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -150,6 +150,14 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
return;
}
+ /*
+ * Add comment characters so rest of line is ignored when
+ * compiled
+ */
+ if (j == 0) {
+ acpi_os_printf("// ");
+ }
+
buf_char = buffer[(acpi_size) i + j];
if (ACPI_IS_PRINT(buf_char)) {
acpi_os_printf("%c", buf_char);
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 5e8df9177da4..a72685c1e819 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -102,12 +102,19 @@ const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
{"_SB_", ACPI_TYPE_DEVICE, NULL},
{"_SI_", ACPI_TYPE_LOCAL_SCOPE, NULL},
{"_TZ_", ACPI_TYPE_DEVICE, NULL},
- {"_REV", ACPI_TYPE_INTEGER, (char *)ACPI_CA_SUPPORT_LEVEL},
+ /*
+ * March, 2015:
+ * The _REV object is in the process of being deprecated, because
+ * other ACPI implementations permanently return 2. Thus, it
+ * has little or no value. Return 2 for compatibility with
+ * other ACPI implementations.
+ */
+ {"_REV", ACPI_TYPE_INTEGER, ACPI_CAST_PTR(char, 2)},
{"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME},
- {"_GL_", ACPI_TYPE_MUTEX, (char *)1},
+ {"_GL_", ACPI_TYPE_MUTEX, ACPI_CAST_PTR(char, 1)},
#if !defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)
- {"_OSI", ACPI_TYPE_METHOD, (char *)1},
+ {"_OSI", ACPI_TYPE_METHOD, ACPI_CAST_PTR(char, 1)},
#endif
/* Table terminator */
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 56bbacd576f2..cbb7034d28d8 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -75,6 +75,7 @@ u8 acpi_ut_is_pci_root_bridge(char *id)
return (FALSE);
}
+#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP)
/*******************************************************************************
*
* FUNCTION: acpi_ut_is_aml_table
@@ -102,6 +103,7 @@ u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
return (FALSE);
}
+#endif
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 574cd3118313..44035abdbf29 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -100,6 +100,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2009", NULL, 0, ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */
{"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */
{"Windows 2013", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */
+ {"Windows 2015", NULL, 0, ACPI_OSI_WIN_10}, /* Windows 10 - Added 03/2015 */
/* Feature Group Strings */
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 82ca9142e10d..2be6bd4bdc09 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -357,11 +357,11 @@ int
acpi_ut_vsnprintf(char *string,
acpi_size size, const char *format, va_list args)
{
- u8 base = 10;
- u8 type = 0;
- s32 width = -1;
- s32 precision = -1;
- char qualifier = 0;
+ u8 base;
+ u8 type;
+ s32 width;
+ s32 precision;
+ char qualifier;
u64 number;
char *pos;
char *end;
@@ -380,6 +380,9 @@ acpi_ut_vsnprintf(char *string,
continue;
}
+ type = 0;
+ base = 10;
+
/* Process sign */
do {
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 8274cc16edc3..f201171c5dda 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -49,39 +49,6 @@ ACPI_MODULE_NAME("utstate")
/*******************************************************************************
*
- * FUNCTION: acpi_ut_create_pkg_state_and_push
- *
- * PARAMETERS: object - Object to be added to the new state
- * action - Increment/Decrement
- * state_list - List the state will be added to
- *
- * RETURN: Status
- *
- * DESCRIPTION: Create a new state and push it
- *
- ******************************************************************************/
-acpi_status
-acpi_ut_create_pkg_state_and_push(void *internal_object,
- void *external_object,
- u16 index,
- union acpi_generic_state **state_list)
-{
- union acpi_generic_state *state;
-
- ACPI_FUNCTION_ENTRY();
-
- state =
- acpi_ut_create_pkg_state(internal_object, external_object, index);
- if (!state) {
- return (AE_NO_MEMORY);
- }
-
- acpi_ut_push_generic_state(state_list, state);
- return (AE_OK);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ut_push_generic_state
*
* PARAMETERS: list_head - Head of the state stack
@@ -92,7 +59,6 @@ acpi_ut_create_pkg_state_and_push(void *internal_object,
* DESCRIPTION: Push a state object onto a state stack
*
******************************************************************************/
-
void
acpi_ut_push_generic_state(union acpi_generic_state **list_head,
union acpi_generic_state *state)
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index c6149a212149..e6cab669bd9c 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -47,6 +47,7 @@
#define _COMPONENT ACPI_COMPILER
ACPI_MODULE_NAME("utuuid")
+#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_HELP_APP)
/*
* UUID support functions.
*
@@ -94,3 +95,4 @@ void acpi_ut_convert_string_to_uuid(char *in_string, u8 *uuid_buffer)
1]);
}
}
+#endif
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 04faf6df959f..24424f3fef96 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -21,7 +21,7 @@
#include <linux/serial_reg.h>
#include <linux/time.h>
#ifdef CONFIG_BCM47XX
-#include <bcm47xx_nvram.h>
+#include <linux/bcm47xx_nvram.h>
#endif
enum bcma_boot_dev {
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 1fc83427199c..81fde9ef7f8e 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2107,13 +2107,12 @@ static int drbd_create_mempools(void)
if (drbd_md_io_page_pool == NULL)
goto Enomem;
- drbd_request_mempool = mempool_create(number,
- mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
+ drbd_request_mempool = mempool_create_slab_pool(number,
+ drbd_request_cache);
if (drbd_request_mempool == NULL)
goto Enomem;
- drbd_ee_mempool = mempool_create(number,
- mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
+ drbd_ee_mempool = mempool_create_slab_pool(number, drbd_ee_cache);
if (drbd_ee_mempool == NULL)
goto Enomem;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 34f2f0ba409b..3907202fb9d9 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -52,9 +52,10 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device,
{
struct drbd_request *req;
- req = mempool_alloc(drbd_request_mempool, GFP_NOIO | __GFP_ZERO);
+ req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
if (!req)
return NULL;
+ memset(req, 0, sizeof(*req));
drbd_req_make_private_bio(req, bio_src);
req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c4fd1e45ce1e..ae3fcb4199e9 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -88,28 +88,6 @@ static int part_shift;
static struct workqueue_struct *loop_wq;
-/*
- * Transfer functions
- */
-static int transfer_none(struct loop_device *lo, int cmd,
- struct page *raw_page, unsigned raw_off,
- struct page *loop_page, unsigned loop_off,
- int size, sector_t real_block)
-{
- char *raw_buf = kmap_atomic(raw_page) + raw_off;
- char *loop_buf = kmap_atomic(loop_page) + loop_off;
-
- if (cmd == READ)
- memcpy(loop_buf, raw_buf, size);
- else
- memcpy(raw_buf, loop_buf, size);
-
- kunmap_atomic(loop_buf);
- kunmap_atomic(raw_buf);
- cond_resched();
- return 0;
-}
-
static int transfer_xor(struct loop_device *lo, int cmd,
struct page *raw_page, unsigned raw_off,
struct page *loop_page, unsigned loop_off,
@@ -148,14 +126,13 @@ static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
static struct loop_func_table none_funcs = {
.number = LO_CRYPT_NONE,
- .transfer = transfer_none,
-};
+};
static struct loop_func_table xor_funcs = {
.number = LO_CRYPT_XOR,
.transfer = transfer_xor,
.init = xor_init
-};
+};
/* xfer_funcs[0] is special - its release function is never called */
static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
@@ -215,207 +192,169 @@ lo_do_transfer(struct loop_device *lo, int cmd,
struct page *lpage, unsigned loffs,
int size, sector_t rblock)
{
- if (unlikely(!lo->transfer))
+ int ret;
+
+ ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
+ if (likely(!ret))
return 0;
- return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
+ printk_ratelimited(KERN_ERR
+ "loop: Transfer error at byte offset %llu, length %i.\n",
+ (unsigned long long)rblock << 9, size);
+ return ret;
}
-/**
- * __do_lo_send_write - helper for writing data to a loop device
- *
- * This helper just factors out common code between do_lo_send_direct_write()
- * and do_lo_send_write().
- */
-static int __do_lo_send_write(struct file *file,
- u8 *buf, const int len, loff_t pos)
+static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
{
- struct kvec kvec = {.iov_base = buf, .iov_len = len};
- struct iov_iter from;
+ struct iov_iter i;
ssize_t bw;
- iov_iter_kvec(&from, ITER_KVEC | WRITE, &kvec, 1, len);
+ iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
file_start_write(file);
- bw = vfs_iter_write(file, &from, &pos);
+ bw = vfs_iter_write(file, &i, ppos);
file_end_write(file);
- if (likely(bw == len))
+
+ if (likely(bw == bvec->bv_len))
return 0;
- printk_ratelimited(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
- (unsigned long long)pos, len);
+
+ printk_ratelimited(KERN_ERR
+ "loop: Write error at byte offset %llu, length %i.\n",
+ (unsigned long long)*ppos, bvec->bv_len);
if (bw >= 0)
bw = -EIO;
return bw;
}
-/**
- * do_lo_send_direct_write - helper for writing data to a loop device
- *
- * This is the fast, non-transforming version that does not need double
- * buffering.
- */
-static int do_lo_send_direct_write(struct loop_device *lo,
- struct bio_vec *bvec, loff_t pos, struct page *page)
+static int lo_write_simple(struct loop_device *lo, struct request *rq,
+ loff_t pos)
{
- ssize_t bw = __do_lo_send_write(lo->lo_backing_file,
- kmap(bvec->bv_page) + bvec->bv_offset,
- bvec->bv_len, pos);
- kunmap(bvec->bv_page);
- cond_resched();
- return bw;
+ struct bio_vec bvec;
+ struct req_iterator iter;
+ int ret = 0;
+
+ rq_for_each_segment(bvec, rq, iter) {
+ ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
+ if (ret < 0)
+ break;
+ cond_resched();
+ }
+
+ return ret;
}
-/**
- * do_lo_send_write - helper for writing data to a loop device
- *
+/*
* This is the slow, transforming version that needs to double buffer the
* data as it cannot do the transformations in place without having direct
* access to the destination pages of the backing file.
*/
-static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
- loff_t pos, struct page *page)
+static int lo_write_transfer(struct loop_device *lo, struct request *rq,
+ loff_t pos)
{
- int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
- bvec->bv_offset, bvec->bv_len, pos >> 9);
- if (likely(!ret))
- return __do_lo_send_write(lo->lo_backing_file,
- page_address(page), bvec->bv_len,
- pos);
- printk_ratelimited(KERN_ERR "loop: Transfer error at byte offset %llu, "
- "length %i.\n", (unsigned long long)pos, bvec->bv_len);
- if (ret > 0)
- ret = -EIO;
- return ret;
-}
-
-static int lo_send(struct loop_device *lo, struct request *rq, loff_t pos)
-{
- int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
- struct page *page);
- struct bio_vec bvec;
+ struct bio_vec bvec, b;
struct req_iterator iter;
- struct page *page = NULL;
+ struct page *page;
int ret = 0;
- if (lo->transfer != transfer_none) {
- page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
- if (unlikely(!page))
- goto fail;
- kmap(page);
- do_lo_send = do_lo_send_write;
- } else {
- do_lo_send = do_lo_send_direct_write;
- }
+ page = alloc_page(GFP_NOIO);
+ if (unlikely(!page))
+ return -ENOMEM;
rq_for_each_segment(bvec, rq, iter) {
- ret = do_lo_send(lo, &bvec, pos, page);
+ ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page,
+ bvec.bv_offset, bvec.bv_len, pos >> 9);
+ if (unlikely(ret))
+ break;
+
+ b.bv_page = page;
+ b.bv_offset = 0;
+ b.bv_len = bvec.bv_len;
+ ret = lo_write_bvec(lo->lo_backing_file, &b, &pos);
if (ret < 0)
break;
- pos += bvec.bv_len;
}
- if (page) {
- kunmap(page);
- __free_page(page);
- }
-out:
+
+ __free_page(page);
return ret;
-fail:
- printk_ratelimited(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
- ret = -ENOMEM;
- goto out;
}
-struct lo_read_data {
- struct loop_device *lo;
- struct page *page;
- unsigned offset;
- int bsize;
-};
+static int lo_read_simple(struct loop_device *lo, struct request *rq,
+ loff_t pos)
+{
+ struct bio_vec bvec;
+ struct req_iterator iter;
+ struct iov_iter i;
+ ssize_t len;
-static int
-lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
- struct splice_desc *sd)
-{
- struct lo_read_data *p = sd->u.data;
- struct loop_device *lo = p->lo;
- struct page *page = buf->page;
- sector_t IV;
- int size;
-
- IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
- (buf->offset >> 9);
- size = sd->len;
- if (size > p->bsize)
- size = p->bsize;
-
- if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
- printk_ratelimited(KERN_ERR "loop: transfer error block %ld\n",
- page->index);
- size = -EINVAL;
- }
+ rq_for_each_segment(bvec, rq, iter) {
+ iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len);
+ len = vfs_iter_read(lo->lo_backing_file, &i, &pos);
+ if (len < 0)
+ return len;
- flush_dcache_page(p->page);
+ flush_dcache_page(bvec.bv_page);
- if (size > 0)
- p->offset += size;
+ if (len != bvec.bv_len) {
+ struct bio *bio;
- return size;
-}
+ __rq_for_each_bio(bio, rq)
+ zero_fill_bio(bio);
+ break;
+ }
+ cond_resched();
+ }
-static int
-lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd)
-{
- return __splice_from_pipe(pipe, sd, lo_splice_actor);
+ return 0;
}
-static ssize_t
-do_lo_receive(struct loop_device *lo,
- struct bio_vec *bvec, int bsize, loff_t pos)
+static int lo_read_transfer(struct loop_device *lo, struct request *rq,
+ loff_t pos)
{
- struct lo_read_data cookie;
- struct splice_desc sd;
- struct file *file;
- ssize_t retval;
+ struct bio_vec bvec, b;
+ struct req_iterator iter;
+ struct iov_iter i;
+ struct page *page;
+ ssize_t len;
+ int ret = 0;
- cookie.lo = lo;
- cookie.page = bvec->bv_page;
- cookie.offset = bvec->bv_offset;
- cookie.bsize = bsize;
+ page = alloc_page(GFP_NOIO);
+ if (unlikely(!page))
+ return -ENOMEM;
- sd.len = 0;
- sd.total_len = bvec->bv_len;
- sd.flags = 0;
- sd.pos = pos;
- sd.u.data = &cookie;
+ rq_for_each_segment(bvec, rq, iter) {
+ loff_t offset = pos;
- file = lo->lo_backing_file;
- retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor);
+ b.bv_page = page;
+ b.bv_offset = 0;
+ b.bv_len = bvec.bv_len;
- return retval;
-}
+ iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len);
+ len = vfs_iter_read(lo->lo_backing_file, &i, &pos);
+ if (len < 0) {
+ ret = len;
+ goto out_free_page;
+ }
-static int
-lo_receive(struct loop_device *lo, struct request *rq, int bsize, loff_t pos)
-{
- struct bio_vec bvec;
- struct req_iterator iter;
- ssize_t s;
+ ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page,
+ bvec.bv_offset, len, offset >> 9);
+ if (ret)
+ goto out_free_page;
- rq_for_each_segment(bvec, rq, iter) {
- s = do_lo_receive(lo, &bvec, bsize, pos);
- if (s < 0)
- return s;
+ flush_dcache_page(bvec.bv_page);
- if (s != bvec.bv_len) {
+ if (len != bvec.bv_len) {
struct bio *bio;
__rq_for_each_bio(bio, rq)
zero_fill_bio(bio);
break;
}
- pos += bvec.bv_len;
}
- return 0;
+
+ ret = 0;
+out_free_page:
+ __free_page(page);
+ return ret;
}
static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos)
@@ -464,10 +403,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
ret = lo_req_flush(lo, rq);
else if (rq->cmd_flags & REQ_DISCARD)
ret = lo_discard(lo, rq, pos);
+ else if (lo->transfer)
+ ret = lo_write_transfer(lo, rq, pos);
else
- ret = lo_send(lo, rq, pos);
- } else
- ret = lo_receive(lo, rq, lo->lo_blocksize, pos);
+ ret = lo_write_simple(lo, rq, pos);
+
+ } else {
+ if (lo->transfer)
+ ret = lo_read_transfer(lo, rq, pos);
+ else
+ ret = lo_read_simple(lo, rq, pos);
+ }
return ret;
}
@@ -788,7 +734,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
- lo->transfer = transfer_none;
+ lo->transfer = NULL;
lo->ioctl = NULL;
lo->lo_sizelimit = 0;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
@@ -1007,7 +953,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
info->lo_encrypt_key_size);
lo->lo_key_owner = uid;
- }
+ }
return 0;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index a98c41f72c63..39e5f7fae3ef 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -32,28 +32,36 @@
#include <net/sock.h>
#include <linux/net.h>
#include <linux/kthread.h>
+#include <linux/types.h>
#include <asm/uaccess.h>
#include <asm/types.h>
#include <linux/nbd.h>
-#define NBD_MAGIC 0x68797548
+struct nbd_device {
+ int flags;
+ int harderror; /* Code of hard error */
+ struct socket * sock; /* If == NULL, device is not ready, yet */
+ int magic;
+
+ spinlock_t queue_lock;
+ struct list_head queue_head; /* Requests waiting result */
+ struct request *active_req;
+ wait_queue_head_t active_wq;
+ struct list_head waiting_queue; /* Requests to be sent */
+ wait_queue_head_t waiting_wq;
+
+ struct mutex tx_lock;
+ struct gendisk *disk;
+ int blksize;
+ loff_t bytesize;
+ pid_t pid; /* pid of nbd-client, if attached */
+ int xmit_timeout;
+ int disconnect; /* a disconnect has been requested by user */
+};
-#ifdef NDEBUG
-#define dprintk(flags, fmt...)
-#else /* NDEBUG */
-#define dprintk(flags, fmt...) do { \
- if (debugflags & (flags)) printk(KERN_DEBUG fmt); \
-} while (0)
-#define DBG_IOCTL 0x0004
-#define DBG_INIT 0x0010
-#define DBG_EXIT 0x0020
-#define DBG_BLKDEV 0x0100
-#define DBG_RX 0x0200
-#define DBG_TX 0x0400
-static unsigned int debugflags;
-#endif /* NDEBUG */
+#define NBD_MAGIC 0x68797548
static unsigned int nbds_max = 16;
static struct nbd_device *nbd_dev;
@@ -71,25 +79,9 @@ static int max_part;
*/
static DEFINE_SPINLOCK(nbd_lock);
-#ifndef NDEBUG
-static const char *ioctl_cmd_to_ascii(int cmd)
+static inline struct device *nbd_to_dev(struct nbd_device *nbd)
{
- switch (cmd) {
- case NBD_SET_SOCK: return "set-sock";
- case NBD_SET_BLKSIZE: return "set-blksize";
- case NBD_SET_SIZE: return "set-size";
- case NBD_SET_TIMEOUT: return "set-timeout";
- case NBD_SET_FLAGS: return "set-flags";
- case NBD_DO_IT: return "do-it";
- case NBD_CLEAR_SOCK: return "clear-sock";
- case NBD_CLEAR_QUE: return "clear-que";
- case NBD_PRINT_DEBUG: return "print-debug";
- case NBD_SET_SIZE_BLOCKS: return "set-size-blocks";
- case NBD_DISCONNECT: return "disconnect";
- case BLKROSET: return "set-read-only";
- case BLKFLSBUF: return "flush-buffer-cache";
- }
- return "unknown";
+ return disk_to_dev(nbd->disk);
}
static const char *nbdcmd_to_ascii(int cmd)
@@ -103,30 +95,26 @@ static const char *nbdcmd_to_ascii(int cmd)
}
return "invalid";
}
-#endif /* NDEBUG */
-static void nbd_end_request(struct request *req)
+static void nbd_end_request(struct nbd_device *nbd, struct request *req)
{
int error = req->errors ? -EIO : 0;
struct request_queue *q = req->q;
unsigned long flags;
- dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
- req, error ? "failed" : "done");
+ dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req,
+ error ? "failed" : "done");
spin_lock_irqsave(q->queue_lock, flags);
__blk_end_request_all(req, error);
spin_unlock_irqrestore(q->queue_lock, flags);
}
+/*
+ * Forcibly shutdown the socket causing all listeners to error
+ */
static void sock_shutdown(struct nbd_device *nbd, int lock)
{
- /* Forcibly shutdown the socket causing all listeners
- * to error
- *
- * FIXME: This code is duplicated from sys_shutdown, but
- * there should be a more generic interface rather than
- * calling socket ops directly here */
if (lock)
mutex_lock(&nbd->tx_lock);
if (nbd->sock) {
@@ -253,17 +241,15 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
}
memcpy(request.handle, &req, sizeof(req));
- dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
- nbd->disk->disk_name, req,
- nbdcmd_to_ascii(nbd_cmd(req)),
- (unsigned long long)blk_rq_pos(req) << 9,
- blk_rq_bytes(req));
+ dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
+ req, nbdcmd_to_ascii(nbd_cmd(req)),
+ (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
result = sock_xmit(nbd, 1, &request, sizeof(request),
(nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
"Send control failed (result %d)\n", result);
- goto error_out;
+ return -EIO;
}
if (nbd_cmd(req) == NBD_CMD_WRITE) {
@@ -277,21 +263,18 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
flags = 0;
if (!rq_iter_last(bvec, iter))
flags = MSG_MORE;
- dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
- nbd->disk->disk_name, req, bvec.bv_len);
+ dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
+ req, bvec.bv_len);
result = sock_send_bvec(nbd, &bvec, flags);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
result);
- goto error_out;
+ return -EIO;
}
}
}
return 0;
-
-error_out:
- return -EIO;
}
static struct request *nbd_find_request(struct nbd_device *nbd,
@@ -302,7 +285,7 @@ static struct request *nbd_find_request(struct nbd_device *nbd,
err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
if (unlikely(err))
- goto out;
+ return ERR_PTR(err);
spin_lock(&nbd->queue_lock);
list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
@@ -314,10 +297,7 @@ static struct request *nbd_find_request(struct nbd_device *nbd,
}
spin_unlock(&nbd->queue_lock);
- err = -ENOENT;
-
-out:
- return ERR_PTR(err);
+ return ERR_PTR(-ENOENT);
}
static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
@@ -371,8 +351,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
return req;
}
- dprintk(DBG_RX, "%s: request %p: got reply\n",
- nbd->disk->disk_name, req);
+ dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
if (nbd_cmd(req) == NBD_CMD_READ) {
struct req_iterator iter;
struct bio_vec bvec;
@@ -385,8 +364,8 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
req->errors++;
return req;
}
- dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
- nbd->disk->disk_name, req, bvec.bv_len);
+ dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
+ req, bvec.bv_len);
}
}
return req;
@@ -426,7 +405,7 @@ static int nbd_do_it(struct nbd_device *nbd)
}
while ((req = nbd_read_stat(nbd)) != NULL)
- nbd_end_request(req);
+ nbd_end_request(nbd, req);
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
nbd->pid = 0;
@@ -455,7 +434,7 @@ static void nbd_clear_que(struct nbd_device *nbd)
queuelist);
list_del_init(&req->queuelist);
req->errors++;
- nbd_end_request(req);
+ nbd_end_request(nbd, req);
}
while (!list_empty(&nbd->waiting_queue)) {
@@ -463,7 +442,7 @@ static void nbd_clear_que(struct nbd_device *nbd)
queuelist);
list_del_init(&req->queuelist);
req->errors++;
- nbd_end_request(req);
+ nbd_end_request(nbd, req);
}
}
@@ -507,7 +486,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
if (nbd_send_req(nbd, req) != 0) {
dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
req->errors++;
- nbd_end_request(req);
+ nbd_end_request(nbd, req);
} else {
spin_lock(&nbd->queue_lock);
list_add_tail(&req->queuelist, &nbd->queue_head);
@@ -522,7 +501,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
error_out:
req->errors++;
- nbd_end_request(req);
+ nbd_end_request(nbd, req);
}
static int nbd_thread(void *data)
@@ -570,18 +549,18 @@ static void do_nbd_request(struct request_queue *q)
spin_unlock_irq(q->queue_lock);
- dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
- req->rq_disk->disk_name, req, req->cmd_type);
-
nbd = req->rq_disk->private_data;
BUG_ON(nbd->magic != NBD_MAGIC);
+ dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n",
+ req, req->cmd_type);
+
if (unlikely(!nbd->sock)) {
dev_err(disk_to_dev(nbd->disk),
"Attempted send on closed socket\n");
req->errors++;
- nbd_end_request(req);
+ nbd_end_request(nbd, req);
spin_lock_irq(q->queue_lock);
continue;
}
@@ -706,13 +685,13 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
else
blk_queue_flush(nbd->disk->queue, 0);
- thread = kthread_create(nbd_thread, nbd, "%s",
- nbd->disk->disk_name);
+ thread = kthread_run(nbd_thread, nbd, "%s",
+ nbd->disk->disk_name);
if (IS_ERR(thread)) {
mutex_lock(&nbd->tx_lock);
return PTR_ERR(thread);
}
- wake_up_process(thread);
+
error = nbd_do_it(nbd);
kthread_stop(thread);
@@ -768,10 +747,6 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
BUG_ON(nbd->magic != NBD_MAGIC);
- /* Anyone capable of this syscall can do *real bad* things */
- dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
- nbd->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
-
mutex_lock(&nbd->tx_lock);
error = __nbd_ioctl(bdev, nbd, cmd, arg);
mutex_unlock(&nbd->tx_lock);
@@ -861,7 +836,6 @@ static int __init nbd_init(void)
}
printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
- dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags);
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = nbd_dev[i].disk;
@@ -920,7 +894,3 @@ module_param(nbds_max, int, 0444);
MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
module_param(max_part, int, 0444);
MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
-#ifndef NDEBUG
-module_param(debugflags, int, 0644);
-MODULE_PARM_DESC(debugflags, "flags for controlling debug output");
-#endif
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index e23be20a3417..85b8036deaa3 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -44,7 +44,7 @@
#define NVME_MINORS (1U << MINORBITS)
#define NVME_Q_DEPTH 1024
-#define NVME_AQ_DEPTH 64
+#define NVME_AQ_DEPTH 256
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
#define ADMIN_TIMEOUT (admin_timeout * HZ)
@@ -152,6 +152,7 @@ struct nvme_cmd_info {
*/
#define NVME_INT_PAGES 2
#define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->page_size)
+#define NVME_INT_MASK 0x01
/*
* Will slightly overestimate the number of pages needed. This is OK
@@ -257,7 +258,7 @@ static void *iod_get_private(struct nvme_iod *iod)
*/
static bool iod_should_kfree(struct nvme_iod *iod)
{
- return (iod->private & 0x01) == 0;
+ return (iod->private & NVME_INT_MASK) == 0;
}
/* Special values must be less than 0x1000 */
@@ -301,8 +302,6 @@ static void *cancel_cmd_info(struct nvme_cmd_info *cmd, nvme_completion_fn *fn)
static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
- struct request *req = ctx;
-
u32 result = le32_to_cpup(&cqe->result);
u16 status = le16_to_cpup(&cqe->status) >> 1;
@@ -311,8 +310,6 @@ static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
if (status == NVME_SC_SUCCESS)
dev_warn(nvmeq->q_dmadev,
"async event result %08x\n", result);
-
- blk_mq_free_hctx_request(nvmeq->hctx, req);
}
static void abort_completion(struct nvme_queue *nvmeq, void *ctx,
@@ -432,7 +429,6 @@ static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev,
{
unsigned size = !(rq->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(rq) :
sizeof(struct nvme_dsm_range);
- unsigned long mask = 0;
struct nvme_iod *iod;
if (rq->nr_phys_segments <= NVME_INT_PAGES &&
@@ -440,9 +436,8 @@ static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev,
struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(rq);
iod = cmd->iod;
- mask = 0x01;
iod_init(iod, size, rq->nr_phys_segments,
- (unsigned long) rq | 0x01);
+ (unsigned long) rq | NVME_INT_MASK);
return iod;
}
@@ -522,8 +517,6 @@ static void nvme_dif_remap(struct request *req,
return;
pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
- if (!pmap)
- return;
p = pmap;
virt = bip_get_seed(bip);
@@ -645,12 +638,12 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
struct scatterlist *sg = iod->sg;
int dma_len = sg_dma_len(sg);
u64 dma_addr = sg_dma_address(sg);
- int offset = offset_in_page(dma_addr);
+ u32 page_size = dev->page_size;
+ int offset = dma_addr & (page_size - 1);
__le64 *prp_list;
__le64 **list = iod_list(iod);
dma_addr_t prp_dma;
int nprps, i;
- u32 page_size = dev->page_size;
length -= (page_size - offset);
if (length <= 0)
@@ -1028,18 +1021,19 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
struct nvme_cmd_info *cmd_info;
struct request *req;
- req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC, false);
+ req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC, true);
if (IS_ERR(req))
return PTR_ERR(req);
req->cmd_flags |= REQ_NO_TIMEOUT;
cmd_info = blk_mq_rq_to_pdu(req);
- nvme_set_info(cmd_info, req, async_req_completion);
+ nvme_set_info(cmd_info, NULL, async_req_completion);
memset(&c, 0, sizeof(c));
c.common.opcode = nvme_admin_async_event;
c.common.command_id = req->tag;
+ blk_mq_free_hctx_request(nvmeq->hctx, req);
return __nvme_submit_cmd(nvmeq, &c);
}
@@ -1347,6 +1341,9 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
nvmeq->cq_vector = -1;
spin_unlock_irq(&nvmeq->q_lock);
+ if (!nvmeq->qid && nvmeq->dev->admin_q)
+ blk_mq_freeze_queue_start(nvmeq->dev->admin_q);
+
irq_set_affinity_hint(vector, NULL);
free_irq(vector, nvmeq);
@@ -1378,8 +1375,6 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
adapter_delete_sq(dev, qid);
adapter_delete_cq(dev, qid);
}
- if (!qid && dev->admin_q)
- blk_mq_freeze_queue_start(dev->admin_q);
spin_lock_irq(&nvmeq->q_lock);
nvme_process_cq(nvmeq);
@@ -1583,6 +1578,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
dev->admin_tagset.ops = &nvme_mq_admin_ops;
dev->admin_tagset.nr_hw_queues = 1;
dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1;
+ dev->admin_tagset.reserved_tags = 1;
dev->admin_tagset.timeout = ADMIN_TIMEOUT;
dev->admin_tagset.numa_node = dev_to_node(&dev->pci_dev->dev);
dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
@@ -1749,25 +1745,31 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
struct nvme_dev *dev = ns->dev;
struct nvme_user_io io;
struct nvme_command c;
- unsigned length, meta_len;
- int status, i;
- struct nvme_iod *iod, *meta_iod = NULL;
- dma_addr_t meta_dma_addr;
- void *meta, *uninitialized_var(meta_mem);
+ unsigned length, meta_len, prp_len;
+ int status, write;
+ struct nvme_iod *iod;
+ dma_addr_t meta_dma = 0;
+ void *meta = NULL;
if (copy_from_user(&io, uio, sizeof(io)))
return -EFAULT;
length = (io.nblocks + 1) << ns->lba_shift;
meta_len = (io.nblocks + 1) * ns->ms;
- if (meta_len && ((io.metadata & 3) || !io.metadata))
+ if (meta_len && ((io.metadata & 3) || !io.metadata) && !ns->ext)
return -EINVAL;
+ else if (meta_len && ns->ext) {
+ length += meta_len;
+ meta_len = 0;
+ }
+
+ write = io.opcode & 1;
switch (io.opcode) {
case nvme_cmd_write:
case nvme_cmd_read:
case nvme_cmd_compare:
- iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
+ iod = nvme_map_user_pages(dev, write, io.addr, length);
break;
default:
return -EINVAL;
@@ -1776,6 +1778,27 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
if (IS_ERR(iod))
return PTR_ERR(iod);
+ prp_len = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
+ if (length != prp_len) {
+ status = -ENOMEM;
+ goto unmap;
+ }
+ if (meta_len) {
+ meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
+ &meta_dma, GFP_KERNEL);
+ if (!meta) {
+ status = -ENOMEM;
+ goto unmap;
+ }
+ if (write) {
+ if (copy_from_user(meta, (void __user *)io.metadata,
+ meta_len)) {
+ status = -EFAULT;
+ goto unmap;
+ }
+ }
+ }
+
memset(&c, 0, sizeof(c));
c.rw.opcode = io.opcode;
c.rw.flags = io.flags;
@@ -1787,75 +1810,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.reftag = cpu_to_le32(io.reftag);
c.rw.apptag = cpu_to_le16(io.apptag);
c.rw.appmask = cpu_to_le16(io.appmask);
-
- if (meta_len) {
- meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata,
- meta_len);
- if (IS_ERR(meta_iod)) {
- status = PTR_ERR(meta_iod);
- meta_iod = NULL;
- goto unmap;
- }
-
- meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
- &meta_dma_addr, GFP_KERNEL);
- if (!meta_mem) {
- status = -ENOMEM;
- goto unmap;
- }
-
- if (io.opcode & 1) {
- int meta_offset = 0;
-
- for (i = 0; i < meta_iod->nents; i++) {
- meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
- meta_iod->sg[i].offset;
- memcpy(meta_mem + meta_offset, meta,
- meta_iod->sg[i].length);
- kunmap_atomic(meta);
- meta_offset += meta_iod->sg[i].length;
- }
- }
-
- c.rw.metadata = cpu_to_le64(meta_dma_addr);
- }
-
- length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
c.rw.prp2 = cpu_to_le64(iod->first_dma);
-
- if (length != (io.nblocks + 1) << ns->lba_shift)
- status = -ENOMEM;
- else
- status = nvme_submit_io_cmd(dev, ns, &c, NULL);
-
- if (meta_len) {
- if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
- int meta_offset = 0;
-
- for (i = 0; i < meta_iod->nents; i++) {
- meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
- meta_iod->sg[i].offset;
- memcpy(meta, meta_mem + meta_offset,
- meta_iod->sg[i].length);
- kunmap_atomic(meta);
- meta_offset += meta_iod->sg[i].length;
- }
- }
-
- dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem,
- meta_dma_addr);
- }
-
+ c.rw.metadata = cpu_to_le64(meta_dma);
+ status = nvme_submit_io_cmd(dev, ns, &c, NULL);
unmap:
- nvme_unmap_user_pages(dev, io.opcode & 1, iod);
+ nvme_unmap_user_pages(dev, write, iod);
nvme_free_iod(dev, iod);
-
- if (meta_iod) {
- nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod);
- nvme_free_iod(dev, meta_iod);
+ if (meta) {
+ if (status == NVME_SC_SUCCESS && !write) {
+ if (copy_to_user((void __user *)io.metadata, meta,
+ meta_len))
+ status = -EFAULT;
+ }
+ dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma);
}
-
return status;
}
@@ -2018,7 +1987,8 @@ static int nvme_revalidate_disk(struct gendisk *disk)
struct nvme_dev *dev = ns->dev;
struct nvme_id_ns *id;
dma_addr_t dma_addr;
- int lbaf, pi_type, old_ms;
+ u8 lbaf, pi_type;
+ u16 old_ms;
unsigned short bs;
id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
@@ -2039,6 +2009,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
ns->lba_shift = id->lbaf[lbaf].ds;
ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
+ ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
/*
* If identify namespace failed, use default 512 byte block size so
@@ -2055,14 +2026,14 @@ static int nvme_revalidate_disk(struct gendisk *disk)
if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
ns->ms != old_ms ||
bs != queue_logical_block_size(disk->queue) ||
- (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT)))
+ (ns->ms && ns->ext)))
blk_integrity_unregister(disk);
ns->pi_type = pi_type;
blk_queue_logical_block_size(ns->queue, bs);
if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) &&
- !(id->flbas & NVME_NS_FLBAS_META_EXT))
+ !ns->ext)
nvme_init_integrity(ns);
if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk)))
@@ -2334,7 +2305,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->oncs = le16_to_cpup(&ctrl->oncs);
dev->abort_limit = ctrl->acl + 1;
dev->vwc = ctrl->vwc;
- dev->event_limit = min(ctrl->aerl + 1, 8);
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
@@ -2881,6 +2851,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
nvme_set_irq_hints(dev);
+ dev->event_limit = 1;
return result;
free_tags:
@@ -3166,8 +3137,10 @@ static int __init nvme_init(void)
nvme_char_major = result;
nvme_class = class_create(THIS_MODULE, "nvme");
- if (!nvme_class)
+ if (IS_ERR(nvme_class)) {
+ result = PTR_ERR(nvme_class);
goto unregister_chrdev;
+ }
result = pci_register_driver(&nvme_driver);
if (result)
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index e10196e0182d..6b736b00f63e 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -55,6 +55,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#define VPD_SERIAL_NUMBER 0x80
#define VPD_DEVICE_IDENTIFIERS 0x83
#define VPD_EXTENDED_INQUIRY 0x86
+#define VPD_BLOCK_LIMITS 0xB0
#define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1
/* CDB offsets */
@@ -132,9 +133,10 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80
#define INQ_DEVICE_IDENTIFICATION_PAGE 0x83
#define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86
+#define INQ_BDEV_LIMITS_PAGE 0xB0
#define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1
#define INQ_SERIAL_NUMBER_LENGTH 0x14
-#define INQ_NUM_SUPPORTED_VPD_PAGES 5
+#define INQ_NUM_SUPPORTED_VPD_PAGES 6
#define VERSION_SPC_4 0x06
#define ACA_UNSUPPORTED 0
#define STANDARD_INQUIRY_LENGTH 36
@@ -747,6 +749,7 @@ static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE;
inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE;
+ inq_response[9] = INQ_BDEV_LIMITS_PAGE;
xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
@@ -938,6 +941,25 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
return res;
}
+static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *inq_response, int alloc_len)
+{
+ __be32 max_sectors = cpu_to_be32(queue_max_hw_sectors(ns->queue));
+ __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors);
+ __be32 discard_desc_count = cpu_to_be32(0x100);
+
+ memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+ inq_response[1] = VPD_BLOCK_LIMITS;
+ inq_response[3] = 0x3c; /* Page Length */
+ memcpy(&inq_response[8], &max_sectors, sizeof(u32));
+ memcpy(&inq_response[20], &max_discard, sizeof(u32));
+
+ if (max_discard)
+ memcpy(&inq_response[24], &discard_desc_count, sizeof(u32));
+
+ return nvme_trans_copy_to_user(hdr, inq_response, 0x3c);
+}
+
static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int alloc_len)
{
@@ -2268,6 +2290,10 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
case VPD_EXTENDED_INQUIRY:
res = nvme_trans_ext_inq_page(ns, hdr, alloc_len);
break;
+ case VPD_BLOCK_LIMITS:
+ res = nvme_trans_bdev_limits_page(ns, hdr, inq_response,
+ alloc_len);
+ break;
case VPD_BLOCK_DEV_CHARACTERISTICS:
res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
break;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 655e570b9b31..5ea2f0bbbc7c 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -342,7 +342,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
struct request_queue *q = vblk->disk->queue;
char cap_str_2[10], cap_str_10[10];
char *envp[] = { "RESIZE=1", NULL };
- u64 capacity, size;
+ u64 capacity;
/* Host must always specify the capacity. */
virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
@@ -354,9 +354,10 @@ static void virtblk_config_changed_work(struct work_struct *work)
capacity = (sector_t)-1;
}
- size = capacity * queue_logical_block_size(q);
- string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
- string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
+ string_get_size(capacity, queue_logical_block_size(q),
+ STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
+ string_get_size(capacity, queue_logical_block_size(q),
+ STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
dev_notice(&vdev->dev,
"new size: %llu %d-byte logical blocks (%s/%s)\n",
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 2a04d341e598..bd2b3bbbb22c 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -34,6 +34,8 @@
* IN THE SOFTWARE.
*/
+#define pr_fmt(fmt) "xen-blkback: " fmt
+
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/list.h>
@@ -211,7 +213,7 @@ static int add_persistent_gnt(struct xen_blkif *blkif,
else if (persistent_gnt->gnt > this->gnt)
new = &((*new)->rb_right);
else {
- pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n");
+ pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
return -EINVAL;
}
}
@@ -242,7 +244,7 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
node = node->rb_right;
else {
if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
- pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n");
+ pr_alert_ratelimited("requesting a grant already in use\n");
return NULL;
}
set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
@@ -257,7 +259,7 @@ static void put_persistent_gnt(struct xen_blkif *blkif,
struct persistent_gnt *persistent_gnt)
{
if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
- pr_alert_ratelimited(DRV_PFX " freeing a grant already unused");
+ pr_alert_ratelimited("freeing a grant already unused\n");
set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
atomic_dec(&blkif->persistent_gnt_in_use);
@@ -374,7 +376,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
}
if (work_pending(&blkif->persistent_purge_work)) {
- pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n");
+ pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
return;
}
@@ -396,7 +398,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
total = num_clean;
- pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
+ pr_debug("Going to purge %u persistent grants\n", num_clean);
BUG_ON(!list_empty(&blkif->persistent_purge_list));
root = &blkif->persistent_gnts;
@@ -428,13 +430,13 @@ purge_list:
* with the requested num
*/
if (!scan_used && !clean_used) {
- pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean);
+ pr_debug("Still missing %u purged frames\n", num_clean);
scan_used = true;
goto purge_list;
}
finished:
if (!clean_used) {
- pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n");
+ pr_debug("Finished scanning for grants to clean, removing used flag\n");
clean_used = true;
goto purge_list;
}
@@ -444,7 +446,7 @@ finished:
/* We can defer this work */
schedule_work(&blkif->persistent_purge_work);
- pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
+ pr_debug("Purged %u/%u\n", (total - num_clean), total);
return;
}
@@ -520,20 +522,20 @@ static void xen_vbd_resize(struct xen_blkif *blkif)
struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
unsigned long long new_size = vbd_sz(vbd);
- pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
+ pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
- pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
+ pr_info("VBD Resize: new size %llu\n", new_size);
vbd->size = new_size;
again:
err = xenbus_transaction_start(&xbt);
if (err) {
- pr_warn(DRV_PFX "Error starting transaction");
+ pr_warn("Error starting transaction\n");
return;
}
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
(unsigned long long)vbd_sz(vbd));
if (err) {
- pr_warn(DRV_PFX "Error writing new size");
+ pr_warn("Error writing new size\n");
goto abort;
}
/*
@@ -543,7 +545,7 @@ again:
*/
err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
if (err) {
- pr_warn(DRV_PFX "Error writing the state");
+ pr_warn("Error writing the state\n");
goto abort;
}
@@ -551,7 +553,7 @@ again:
if (err == -EAGAIN)
goto again;
if (err)
- pr_warn(DRV_PFX "Error ending transaction");
+ pr_warn("Error ending transaction\n");
return;
abort:
xenbus_transaction_end(xbt, 1);
@@ -578,7 +580,7 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
static void print_stats(struct xen_blkif *blkif)
{
- pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
+ pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
" | ds %4llu | pg: %4u/%4d\n",
current->comm, blkif->st_oo_req,
blkif->st_rd_req, blkif->st_wr_req,
@@ -855,7 +857,7 @@ again:
/* This is a newly mapped grant */
BUG_ON(new_map_idx >= segs_to_map);
if (unlikely(map[new_map_idx].status != 0)) {
- pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
+ pr_debug("invalid buffer -- could not remap it\n");
put_free_pages(blkif, &pages[seg_idx]->page, 1);
pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
ret |= 1;
@@ -891,14 +893,14 @@ again:
goto next;
}
pages[seg_idx]->persistent_gnt = persistent_gnt;
- pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
+ pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
persistent_gnt->gnt, blkif->persistent_gnt_c,
xen_blkif_max_pgrants);
goto next;
}
if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
blkif->vbd.overflow_max_grants = 1;
- pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
+ pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
blkif->domid, blkif->vbd.handle);
}
/*
@@ -916,7 +918,7 @@ next:
return ret;
out_of_memory:
- pr_alert(DRV_PFX "%s: out of memory\n", __func__);
+ pr_alert("%s: out of memory\n", __func__);
put_free_pages(blkif, pages_to_gnt, segs_to_map);
return -ENOMEM;
}
@@ -996,7 +998,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
err = xen_vbd_translate(&preq, blkif, WRITE);
if (err) {
- pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
+ pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
preq.sector_number,
preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
goto fail_response;
@@ -1012,7 +1014,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
GFP_KERNEL, secure);
fail_response:
if (err == -EOPNOTSUPP) {
- pr_debug(DRV_PFX "discard op failed, not supported\n");
+ pr_debug("discard op failed, not supported\n");
status = BLKIF_RSP_EOPNOTSUPP;
} else if (err)
status = BLKIF_RSP_ERROR;
@@ -1056,16 +1058,16 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
/* An error fails the entire request. */
if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
(error == -EOPNOTSUPP)) {
- pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
+ pr_debug("flush diskcache op failed, not supported\n");
xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
pending_req->status = BLKIF_RSP_EOPNOTSUPP;
} else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
(error == -EOPNOTSUPP)) {
- pr_debug(DRV_PFX "write barrier op failed, not supported\n");
+ pr_debug("write barrier op failed, not supported\n");
xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
pending_req->status = BLKIF_RSP_EOPNOTSUPP;
} else if (error) {
- pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
+ pr_debug("Buffer not up-to-date at end of operation,"
" error=%d\n", error);
pending_req->status = BLKIF_RSP_ERROR;
}
@@ -1110,7 +1112,7 @@ __do_block_io_op(struct xen_blkif *blkif)
if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
rc = blk_rings->common.rsp_prod_pvt;
- pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
+ pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
rp, rc, rp - rc, blkif->vbd.pdevice);
return -EACCES;
}
@@ -1217,8 +1219,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
if ((req->operation == BLKIF_OP_INDIRECT) &&
(req_operation != BLKIF_OP_READ) &&
(req_operation != BLKIF_OP_WRITE)) {
- pr_debug(DRV_PFX "Invalid indirect operation (%u)\n",
- req_operation);
+ pr_debug("Invalid indirect operation (%u)\n", req_operation);
goto fail_response;
}
@@ -1252,8 +1253,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
unlikely((req->operation == BLKIF_OP_INDIRECT) &&
(nseg > MAX_INDIRECT_SEGMENTS))) {
- pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
- nseg);
+ pr_debug("Bad number of segments in request (%d)\n", nseg);
/* Haven't submitted any bio's yet. */
goto fail_response;
}
@@ -1288,7 +1288,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
}
if (xen_vbd_translate(&preq, blkif, operation) != 0) {
- pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
+ pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
operation == READ ? "read" : "write",
preq.sector_number,
preq.sector_number + preq.nr_sects,
@@ -1303,7 +1303,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
for (i = 0; i < nseg; i++) {
if (((int)preq.sector_number|(int)seg[i].nsec) &
((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
- pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
+ pr_debug("Misaligned I/O request from domain %d\n",
blkif->domid);
goto fail_response;
}
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 375d28851860..f620b5d3f77c 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -44,12 +44,6 @@
#include <xen/interface/io/blkif.h>
#include <xen/interface/io/protocols.h>
-#define DRV_PFX "xen-blkback:"
-#define DPRINTK(fmt, args...) \
- pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \
- __func__, __LINE__, ##args)
-
-
/*
* This is the maximum number of segments that would be allowed in indirect
* requests. This value will also be passed to the frontend.
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index ff3025922c14..6ab69ad61ee1 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -14,6 +14,8 @@
*/
+#define pr_fmt(fmt) "xen-blkback: " fmt
+
#include <stdarg.h>
#include <linux/module.h>
#include <linux/kthread.h>
@@ -21,6 +23,9 @@
#include <xen/grant_table.h>
#include "common.h"
+/* Enlarge the array size in order to fully show blkback name. */
+#define BLKBACK_NAME_LEN (20)
+
struct backend_info {
struct xenbus_device *dev;
struct xen_blkif *blkif;
@@ -70,7 +75,7 @@ static int blkback_name(struct xen_blkif *blkif, char *buf)
else
devname = devpath;
- snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname);
+ snprintf(buf, BLKBACK_NAME_LEN, "blkback.%d.%s", blkif->domid, devname);
kfree(devpath);
return 0;
@@ -79,7 +84,7 @@ static int blkback_name(struct xen_blkif *blkif, char *buf)
static void xen_update_blkif_status(struct xen_blkif *blkif)
{
int err;
- char name[TASK_COMM_LEN];
+ char name[BLKBACK_NAME_LEN];
/* Not ready to connect? */
if (!blkif->irq || !blkif->vbd.bdev)
@@ -424,14 +429,14 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
FMODE_READ : FMODE_WRITE, NULL);
if (IS_ERR(bdev)) {
- DPRINTK("xen_vbd_create: device %08x could not be opened.\n",
+ pr_warn("xen_vbd_create: device %08x could not be opened\n",
vbd->pdevice);
return -ENOENT;
}
vbd->bdev = bdev;
if (vbd->bdev->bd_disk == NULL) {
- DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
+ pr_warn("xen_vbd_create: device %08x doesn't exist\n",
vbd->pdevice);
xen_vbd_free(vbd);
return -ENOENT;
@@ -450,7 +455,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
if (q && blk_queue_secdiscard(q))
vbd->discard_secure = true;
- DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
+ pr_debug("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid);
return 0;
}
@@ -458,7 +463,7 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
- DPRINTK("");
+ pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
if (be->major || be->minor)
xenvbd_sysfs_delif(dev);
@@ -564,6 +569,10 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
int err;
struct backend_info *be = kzalloc(sizeof(struct backend_info),
GFP_KERNEL);
+
+ /* match the pr_debug in xen_blkbk_remove */
+ pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
+
if (!be) {
xenbus_dev_fatal(dev, -ENOMEM,
"allocating backend structure");
@@ -595,7 +604,7 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
return 0;
fail:
- DPRINTK("failed");
+ pr_warn("%s failed\n", __func__);
xen_blkbk_remove(dev);
return err;
}
@@ -619,7 +628,7 @@ static void backend_changed(struct xenbus_watch *watch,
unsigned long handle;
char *device_type;
- DPRINTK("");
+ pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
&major, &minor);
@@ -638,7 +647,7 @@ static void backend_changed(struct xenbus_watch *watch,
if (be->major | be->minor) {
if (be->major != major || be->minor != minor)
- pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
+ pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n",
be->major, be->minor, major, minor);
return;
}
@@ -699,13 +708,12 @@ static void frontend_changed(struct xenbus_device *dev,
struct backend_info *be = dev_get_drvdata(&dev->dev);
int err;
- DPRINTK("%s", xenbus_strstate(frontend_state));
+ pr_debug("%s %p %s\n", __func__, dev, xenbus_strstate(frontend_state));
switch (frontend_state) {
case XenbusStateInitialising:
if (dev->state == XenbusStateClosed) {
- pr_info(DRV_PFX "%s: prepare for reconnect\n",
- dev->nodename);
+ pr_info("%s: prepare for reconnect\n", dev->nodename);
xenbus_switch_state(dev, XenbusStateInitWait);
}
break;
@@ -772,7 +780,7 @@ static void connect(struct backend_info *be)
int err;
struct xenbus_device *dev = be->dev;
- DPRINTK("%s", dev->otherend);
+ pr_debug("%s %s\n", __func__, dev->otherend);
/* Supply the information about the device the frontend needs */
again:
@@ -858,7 +866,7 @@ static int connect_ring(struct backend_info *be)
char protocol[64] = "";
int err;
- DPRINTK("%s", dev->otherend);
+ pr_debug("%s %s\n", __func__, dev->otherend);
err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu",
&ring_ref, "event-channel", "%u", &evtchn, NULL);
@@ -893,7 +901,7 @@ static int connect_ring(struct backend_info *be)
be->blkif->vbd.feature_gnt_persistent = pers_grants;
be->blkif->vbd.overflow_max_grants = 0;
- pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s) %s\n",
+ pr_info("ring-ref %ld, event-channel %d, protocol %d (%s) %s\n",
ring_ref, evtchn, be->blkif->blk_protocol, protocol,
pers_grants ? "persistent grants" : "");
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index b99729e36860..cbddbaddb347 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -20,6 +20,19 @@ config IMX_WEIM
The WEIM(Wireless External Interface Module) works like a bus.
You can attach many different devices on it, such as NOR, onenand.
+config MIPS_CDMM
+ bool "MIPS Common Device Memory Map (CDMM) Driver"
+ depends on CPU_MIPSR2
+ help
+ Driver needed for the MIPS Common Device Memory Map bus in MIPS
+ cores. This bus is for per-CPU tightly coupled devices such as the
+ Fast Debug Channel (FDC).
+
+ For this to work, either your bootloader needs to enable the CDMM
+ region at an unused physical address on the boot CPU, or else your
+ platform code needs to implement mips_cdmm_phys_base() (see
+ asm/cdmm.h).
+
config MVEBU_MBUS
bool
depends on PLAT_ORION
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 2973c18cbcc2..807dd17ef2f8 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
obj-$(CONFIG_IMX_WEIM) += imx-weim.o
+obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o
obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
new file mode 100644
index 000000000000..5bd792c68f9b
--- /dev/null
+++ b/drivers/bus/mips_cdmm.c
@@ -0,0 +1,716 @@
+/*
+ * Bus driver for MIPS Common Device Memory Map (CDMM).
+ *
+ * Copyright (C) 2014-2015 Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/atomic.h>
+#include <linux/err.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <asm/cdmm.h>
+#include <asm/hazards.h>
+#include <asm/mipsregs.h>
+
+/* Access control and status register fields */
+#define CDMM_ACSR_DEVTYPE_SHIFT 24
+#define CDMM_ACSR_DEVTYPE (255ul << CDMM_ACSR_DEVTYPE_SHIFT)
+#define CDMM_ACSR_DEVSIZE_SHIFT 16
+#define CDMM_ACSR_DEVSIZE (31ul << CDMM_ACSR_DEVSIZE_SHIFT)
+#define CDMM_ACSR_DEVREV_SHIFT 12
+#define CDMM_ACSR_DEVREV (15ul << CDMM_ACSR_DEVREV_SHIFT)
+#define CDMM_ACSR_UW (1ul << 3)
+#define CDMM_ACSR_UR (1ul << 2)
+#define CDMM_ACSR_SW (1ul << 1)
+#define CDMM_ACSR_SR (1ul << 0)
+
+/* Each block of device registers is 64 bytes */
+#define CDMM_DRB_SIZE 64
+
+#define to_mips_cdmm_driver(d) container_of(d, struct mips_cdmm_driver, drv)
+
+/* Default physical base address */
+static phys_addr_t mips_cdmm_default_base;
+
+/* Bus operations */
+
+static const struct mips_cdmm_device_id *
+mips_cdmm_lookup(const struct mips_cdmm_device_id *table,
+ struct mips_cdmm_device *dev)
+{
+ int ret = 0;
+
+ for (; table->type; ++table) {
+ ret = (dev->type == table->type);
+ if (ret)
+ break;
+ }
+
+ return ret ? table : NULL;
+}
+
+static int mips_cdmm_match(struct device *dev, struct device_driver *drv)
+{
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+ struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(drv);
+
+ return mips_cdmm_lookup(cdrv->id_table, cdev) != NULL;
+}
+
+static int mips_cdmm_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+ int retval = 0;
+
+ retval = add_uevent_var(env, "CDMM_CPU=%u", cdev->cpu);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "CDMM_TYPE=0x%02x", cdev->type);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "CDMM_REV=%u", cdev->rev);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "MODALIAS=mipscdmm:t%02X", cdev->type);
+ return retval;
+}
+
+/* Device attributes */
+
+#define CDMM_ATTR(name, fmt, arg...) \
+static ssize_t name##_show(struct device *_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct mips_cdmm_device *dev = to_mips_cdmm_device(_dev); \
+ return sprintf(buf, fmt, arg); \
+} \
+static DEVICE_ATTR_RO(name);
+
+CDMM_ATTR(cpu, "%u\n", dev->cpu);
+CDMM_ATTR(type, "0x%02x\n", dev->type);
+CDMM_ATTR(revision, "%u\n", dev->rev);
+CDMM_ATTR(modalias, "mipscdmm:t%02X\n", dev->type);
+CDMM_ATTR(resource, "\t%016llx\t%016llx\t%016lx\n",
+ (unsigned long long)dev->res.start,
+ (unsigned long long)dev->res.end,
+ dev->res.flags);
+
+static struct attribute *mips_cdmm_dev_attrs[] = {
+ &dev_attr_cpu.attr,
+ &dev_attr_type.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_resource.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mips_cdmm_dev);
+
+struct bus_type mips_cdmm_bustype = {
+ .name = "cdmm",
+ .dev_groups = mips_cdmm_dev_groups,
+ .match = mips_cdmm_match,
+ .uevent = mips_cdmm_uevent,
+};
+EXPORT_SYMBOL_GPL(mips_cdmm_bustype);
+
+/*
+ * Standard driver callback helpers.
+ *
+ * All the CDMM driver callbacks need to be executed on the appropriate CPU from
+ * workqueues. For the standard driver callbacks we need a work function
+ * (mips_cdmm_{void,int}_work()) to do the actual call from the right CPU, and a
+ * wrapper function (generated with BUILD_PERCPU_HELPER) to arrange for the work
+ * function to be called on that CPU.
+ */
+
+/**
+ * struct mips_cdmm_work_dev - Data for per-device call work.
+ * @fn: CDMM driver callback function to call for the device.
+ * @dev: CDMM device to pass to @fn.
+ */
+struct mips_cdmm_work_dev {
+ void *fn;
+ struct mips_cdmm_device *dev;
+};
+
+/**
+ * mips_cdmm_void_work() - Call a void returning CDMM driver callback.
+ * @data: struct mips_cdmm_work_dev pointer.
+ *
+ * A work_on_cpu() callback function to call an arbitrary CDMM driver callback
+ * function which doesn't return a value.
+ */
+static long mips_cdmm_void_work(void *data)
+{
+ struct mips_cdmm_work_dev *work = data;
+ void (*fn)(struct mips_cdmm_device *) = work->fn;
+
+ fn(work->dev);
+ return 0;
+}
+
+/**
+ * mips_cdmm_int_work() - Call an int returning CDMM driver callback.
+ * @data: struct mips_cdmm_work_dev pointer.
+ *
+ * A work_on_cpu() callback function to call an arbitrary CDMM driver callback
+ * function which returns an int.
+ */
+static long mips_cdmm_int_work(void *data)
+{
+ struct mips_cdmm_work_dev *work = data;
+ int (*fn)(struct mips_cdmm_device *) = work->fn;
+
+ return fn(work->dev);
+}
+
+#define _BUILD_RET_void
+#define _BUILD_RET_int return
+
+/**
+ * BUILD_PERCPU_HELPER() - Helper to call a CDMM driver callback on right CPU.
+ * @_ret: Return type (void or int).
+ * @_name: Name of CDMM driver callback function.
+ *
+ * Generates a specific device callback function to call a CDMM driver callback
+ * function on the appropriate CPU for the device, and if applicable return the
+ * result.
+ */
+#define BUILD_PERCPU_HELPER(_ret, _name) \
+static _ret mips_cdmm_##_name(struct device *dev) \
+{ \
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev); \
+ struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(dev->driver); \
+ struct mips_cdmm_work_dev work = { \
+ .fn = cdrv->_name, \
+ .dev = cdev, \
+ }; \
+ \
+ _BUILD_RET_##_ret work_on_cpu(cdev->cpu, \
+ mips_cdmm_##_ret##_work, &work); \
+}
+
+/* Driver callback functions */
+BUILD_PERCPU_HELPER(int, probe) /* int mips_cdmm_probe(struct device) */
+BUILD_PERCPU_HELPER(int, remove) /* int mips_cdmm_remove(struct device) */
+BUILD_PERCPU_HELPER(void, shutdown) /* void mips_cdmm_shutdown(struct device) */
+
+
+/* Driver registration */
+
+/**
+ * mips_cdmm_driver_register() - Register a CDMM driver.
+ * @drv: CDMM driver information.
+ *
+ * Register a CDMM driver with the CDMM subsystem. The driver will be informed
+ * of matching devices which are discovered.
+ *
+ * Returns: 0 on success.
+ */
+int mips_cdmm_driver_register(struct mips_cdmm_driver *drv)
+{
+ drv->drv.bus = &mips_cdmm_bustype;
+
+ if (drv->probe)
+ drv->drv.probe = mips_cdmm_probe;
+ if (drv->remove)
+ drv->drv.remove = mips_cdmm_remove;
+ if (drv->shutdown)
+ drv->drv.shutdown = mips_cdmm_shutdown;
+
+ return driver_register(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_driver_register);
+
+/**
+ * mips_cdmm_driver_unregister() - Unregister a CDMM driver.
+ * @drv: CDMM driver information.
+ *
+ * Unregister a CDMM driver from the CDMM subsystem.
+ */
+void mips_cdmm_driver_unregister(struct mips_cdmm_driver *drv)
+{
+ driver_unregister(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_driver_unregister);
+
+
+/* CDMM initialisation and bus discovery */
+
+/**
+ * struct mips_cdmm_bus - Info about CDMM bus.
+ * @phys: Physical address at which it is mapped.
+ * @regs: Virtual address where registers can be accessed.
+ * @drbs: Total number of DRBs.
+ * @drbs_reserved: Number of DRBs reserved.
+ * @discovered: Whether the devices on the bus have been discovered yet.
+ * @offline: Whether the CDMM bus is going offline (or very early
+ * coming back online), in which case it should be
+ * reconfigured each time.
+ */
+struct mips_cdmm_bus {
+ phys_addr_t phys;
+ void __iomem *regs;
+ unsigned int drbs;
+ unsigned int drbs_reserved;
+ bool discovered;
+ bool offline;
+};
+
+static struct mips_cdmm_bus mips_cdmm_boot_bus;
+static DEFINE_PER_CPU(struct mips_cdmm_bus *, mips_cdmm_buses);
+static atomic_t mips_cdmm_next_id = ATOMIC_INIT(-1);
+
+/**
+ * mips_cdmm_get_bus() - Get the per-CPU CDMM bus information.
+ *
+ * Get information about the per-CPU CDMM bus, if the bus is present.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns: Pointer to CDMM bus information for the current CPU.
+ * May return ERR_PTR(-errno) in case of error, so check with
+ * IS_ERR().
+ */
+static struct mips_cdmm_bus *mips_cdmm_get_bus(void)
+{
+ struct mips_cdmm_bus *bus, **bus_p;
+ unsigned long flags;
+ unsigned int cpu;
+
+ if (!cpu_has_cdmm)
+ return ERR_PTR(-ENODEV);
+
+ cpu = smp_processor_id();
+ /* Avoid early use of per-cpu primitives before initialised */
+ if (cpu == 0)
+ return &mips_cdmm_boot_bus;
+
+ /* Get bus pointer */
+ bus_p = per_cpu_ptr(&mips_cdmm_buses, cpu);
+ local_irq_save(flags);
+ bus = *bus_p;
+ /* Attempt allocation if NULL */
+ if (unlikely(!bus)) {
+ bus = kzalloc(sizeof(*bus), GFP_ATOMIC);
+ if (unlikely(!bus))
+ bus = ERR_PTR(-ENOMEM);
+ else
+ *bus_p = bus;
+ }
+ local_irq_restore(flags);
+ return bus;
+}
+
+/**
+ * mips_cdmm_cur_base() - Find current physical base address of CDMM region.
+ *
+ * Returns: Physical base address of CDMM region according to cdmmbase CP0
+ * register, or 0 if the CDMM region is disabled.
+ */
+static phys_addr_t mips_cdmm_cur_base(void)
+{
+ unsigned long cdmmbase = read_c0_cdmmbase();
+
+ if (!(cdmmbase & MIPS_CDMMBASE_EN))
+ return 0;
+
+ return (cdmmbase >> MIPS_CDMMBASE_ADDR_SHIFT)
+ << MIPS_CDMMBASE_ADDR_START;
+}
+
+/**
+ * mips_cdmm_setup() - Ensure the CDMM bus is initialised and usable.
+ * @bus: Pointer to bus information for current CPU.
+ * IS_ERR(bus) is checked, so no need for caller to check.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns 0 on success, -errno on failure.
+ */
+static int mips_cdmm_setup(struct mips_cdmm_bus *bus)
+{
+ unsigned long cdmmbase, flags;
+ int ret = 0;
+
+ if (IS_ERR(bus))
+ return PTR_ERR(bus);
+
+ local_irq_save(flags);
+ /* Don't set up bus a second time unless marked offline */
+ if (bus->offline) {
+ /* If CDMM region is still set up, nothing to do */
+ if (bus->phys == mips_cdmm_cur_base())
+ goto out;
+ /*
+ * The CDMM region isn't set up as expected, so it needs
+ * reconfiguring, but then we can stop checking it.
+ */
+ bus->offline = false;
+ } else if (bus->phys > 1) {
+ goto out;
+ }
+
+ /* If the CDMM region is already configured, inherit that setup */
+ if (!bus->phys)
+ bus->phys = mips_cdmm_cur_base();
+ /* Otherwise, ask platform code for suggestions */
+ if (!bus->phys && mips_cdmm_phys_base)
+ bus->phys = mips_cdmm_phys_base();
+ /* Otherwise, copy what other CPUs have done */
+ if (!bus->phys)
+ bus->phys = mips_cdmm_default_base;
+ /* Otherwise, complain once */
+ if (!bus->phys) {
+ bus->phys = 1;
+ /*
+ * If you hit this, either your bootloader needs to set up the
+ * CDMM on the boot CPU, or else you need to implement
+ * mips_cdmm_phys_base() for your platform (see asm/cdmm.h).
+ */
+ pr_err("cdmm%u: Failed to choose a physical base\n",
+ smp_processor_id());
+ }
+ /* Already complained? */
+ if (bus->phys == 1) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /* Record our success for other CPUs to copy */
+ mips_cdmm_default_base = bus->phys;
+
+ pr_debug("cdmm%u: Enabling CDMM region at %pa\n",
+ smp_processor_id(), &bus->phys);
+
+ /* Enable CDMM */
+ cdmmbase = read_c0_cdmmbase();
+ cdmmbase &= (1ul << MIPS_CDMMBASE_ADDR_SHIFT) - 1;
+ cdmmbase |= (bus->phys >> MIPS_CDMMBASE_ADDR_START)
+ << MIPS_CDMMBASE_ADDR_SHIFT;
+ cdmmbase |= MIPS_CDMMBASE_EN;
+ write_c0_cdmmbase(cdmmbase);
+ tlbw_use_hazard();
+
+ bus->regs = (void __iomem *)CKSEG1ADDR(bus->phys);
+ bus->drbs = 1 + ((cdmmbase & MIPS_CDMMBASE_SIZE) >>
+ MIPS_CDMMBASE_SIZE_SHIFT);
+ bus->drbs_reserved = !!(cdmmbase & MIPS_CDMMBASE_CI);
+
+out:
+ local_irq_restore(flags);
+ return ret;
+}
+
+/**
+ * mips_cdmm_early_probe() - Minimally probe for a specific device on CDMM.
+ * @dev_type: CDMM type code to look for.
+ *
+ * Minimally configure the in-CPU Common Device Memory Map (CDMM) and look for a
+ * specific device. This can be used to find a device very early in boot for
+ * example to configure an early FDC console device.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns: MMIO pointer to device memory. The caller can read the ACSR
+ * register to find more information about the device (such as the
+ * version number or the number of blocks).
+ * May return IOMEM_ERR_PTR(-errno) in case of error, so check with
+ * IS_ERR().
+ */
+void __iomem *mips_cdmm_early_probe(unsigned int dev_type)
+{
+ struct mips_cdmm_bus *bus;
+ void __iomem *cdmm;
+ u32 acsr;
+ unsigned int drb, type, size;
+ int err;
+
+ if (WARN_ON(!dev_type))
+ return IOMEM_ERR_PTR(-ENODEV);
+
+ bus = mips_cdmm_get_bus();
+ err = mips_cdmm_setup(bus);
+ if (err)
+ return IOMEM_ERR_PTR(err);
+
+ /* Skip the first block if it's reserved for more registers */
+ drb = bus->drbs_reserved;
+ cdmm = bus->regs;
+
+ /* Look for a specific device type */
+ for (; drb < bus->drbs; drb += size + 1) {
+ acsr = readl(cdmm + drb * CDMM_DRB_SIZE);
+ type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
+ if (type == dev_type)
+ return cdmm + drb * CDMM_DRB_SIZE;
+ size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
+ }
+
+ return IOMEM_ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_early_probe);
+
+/**
+ * mips_cdmm_release() - Release a removed CDMM device.
+ * @dev: Device object
+ *
+ * Clean up the struct mips_cdmm_device for an unused CDMM device. This is
+ * called automatically by the driver core when a device is removed.
+ */
+static void mips_cdmm_release(struct device *dev)
+{
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+
+ kfree(cdev);
+}
+
+/**
+ * mips_cdmm_bus_discover() - Discover the devices on the CDMM bus.
+ * @bus: CDMM bus information, must already be set up.
+ */
+static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus)
+{
+ void __iomem *cdmm;
+ u32 acsr;
+ unsigned int drb, type, size, rev;
+ struct mips_cdmm_device *dev;
+ unsigned int cpu = smp_processor_id();
+ int ret = 0;
+ int id = 0;
+
+ /* Skip the first block if it's reserved for more registers */
+ drb = bus->drbs_reserved;
+ cdmm = bus->regs;
+
+ /* Discover devices */
+ bus->discovered = true;
+ pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs);
+ for (; drb < bus->drbs; drb += size + 1) {
+ acsr = readl(cdmm + drb * CDMM_DRB_SIZE);
+ type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
+ size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
+ rev = (acsr & CDMM_ACSR_DEVREV) >> CDMM_ACSR_DEVREV_SHIFT;
+
+ if (!type)
+ continue;
+
+ pr_info("cdmm%u-%u: @%u (%#x..%#x), type 0x%02x, rev %u\n",
+ cpu, id, drb, drb * CDMM_DRB_SIZE,
+ (drb + size + 1) * CDMM_DRB_SIZE - 1,
+ type, rev);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ break;
+
+ dev->cpu = cpu;
+ dev->res.start = bus->phys + drb * CDMM_DRB_SIZE;
+ dev->res.end = bus->phys +
+ (drb + size + 1) * CDMM_DRB_SIZE - 1;
+ dev->res.flags = IORESOURCE_MEM;
+ dev->type = type;
+ dev->rev = rev;
+ dev->dev.parent = get_cpu_device(cpu);
+ dev->dev.bus = &mips_cdmm_bustype;
+ dev->dev.id = atomic_inc_return(&mips_cdmm_next_id);
+ dev->dev.release = mips_cdmm_release;
+
+ dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id);
+ ++id;
+ ret = device_register(&dev->dev);
+ if (ret) {
+ put_device(&dev->dev);
+ kfree(dev);
+ }
+ }
+}
+
+
+/*
+ * CPU hotplug and initialisation
+ *
+ * All the CDMM driver callbacks need to be executed on the appropriate CPU from
+ * workqueues. For the CPU callbacks, they need to be called for all devices on
+ * that CPU, so the work function calls bus_for_each_dev, using a helper
+ * (generated with BUILD_PERDEV_HELPER) to call the driver callback if the
+ * device's CPU matches.
+ */
+
+/**
+ * BUILD_PERDEV_HELPER() - Helper to call a CDMM driver callback if CPU matches.
+ * @_name: Name of CDMM driver callback function.
+ *
+ * Generates a bus_for_each_dev callback function to call a specific CDMM driver
+ * callback function for the device if the device's CPU matches that pointed to
+ * by the data argument.
+ *
+ * This is used for informing drivers for all devices on a given CPU of some
+ * event (such as the CPU going online/offline).
+ *
+ * It is expected to already be called from the appropriate CPU.
+ */
+#define BUILD_PERDEV_HELPER(_name) \
+static int mips_cdmm_##_name##_helper(struct device *dev, void *data) \
+{ \
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev); \
+ struct mips_cdmm_driver *cdrv; \
+ unsigned int cpu = *(unsigned int *)data; \
+ \
+ if (cdev->cpu != cpu || !dev->driver) \
+ return 0; \
+ \
+ cdrv = to_mips_cdmm_driver(dev->driver); \
+ if (!cdrv->_name) \
+ return 0; \
+ return cdrv->_name(cdev); \
+}
+
+/* bus_for_each_dev callback helper functions */
+BUILD_PERDEV_HELPER(cpu_down) /* int mips_cdmm_cpu_down_helper(...) */
+BUILD_PERDEV_HELPER(cpu_up) /* int mips_cdmm_cpu_up_helper(...) */
+
+/**
+ * mips_cdmm_bus_down() - Tear down the CDMM bus.
+ * @data: Pointer to unsigned int CPU number.
+ *
+ * This work_on_cpu callback function is executed on a given CPU to call the
+ * CDMM driver cpu_down callback for all devices on that CPU.
+ */
+static long mips_cdmm_bus_down(void *data)
+{
+ struct mips_cdmm_bus *bus;
+ long ret;
+
+ /* Inform all the devices on the bus */
+ ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, data,
+ mips_cdmm_cpu_down_helper);
+
+ /*
+ * While bus is offline, each use of it should reconfigure it just in
+ * case it is first use when coming back online again.
+ */
+ bus = mips_cdmm_get_bus();
+ if (!IS_ERR(bus))
+ bus->offline = true;
+
+ return ret;
+}
+
+/**
+ * mips_cdmm_bus_up() - Bring up the CDMM bus.
+ * @data: Pointer to unsigned int CPU number.
+ *
+ * This work_on_cpu callback function is executed on a given CPU to discover
+ * CDMM devices on that CPU, or to call the CDMM driver cpu_up callback for all
+ * devices already discovered on that CPU.
+ *
+ * It is used during initialisation and when CPUs are brought online.
+ */
+static long mips_cdmm_bus_up(void *data)
+{
+ struct mips_cdmm_bus *bus;
+ long ret;
+
+ bus = mips_cdmm_get_bus();
+ ret = mips_cdmm_setup(bus);
+ if (ret)
+ return ret;
+
+ /* Bus now set up, so we can drop the offline flag if still set */
+ bus->offline = false;
+
+ if (!bus->discovered)
+ mips_cdmm_bus_discover(bus);
+ else
+ /* Inform all the devices on the bus */
+ ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, data,
+ mips_cdmm_cpu_up_helper);
+
+ return ret;
+}
+
+/**
+ * mips_cdmm_cpu_notify() - Take action when a CPU is going online or offline.
+ * @nb: CPU notifier block .
+ * @action: Event that has taken place (CPU_*).
+ * @data: CPU number.
+ *
+ * This notifier is used to keep the CDMM buses updated as CPUs are offlined and
+ * onlined. When CPUs go offline or come back online, so does their CDMM bus, so
+ * devices must be informed. Also when CPUs come online for the first time the
+ * devices on the CDMM bus need discovering.
+ *
+ * Returns: NOTIFY_OK if event was used.
+ * NOTIFY_DONE if we didn't care.
+ */
+static int mips_cdmm_cpu_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ unsigned int cpu = (unsigned int)data;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ work_on_cpu(cpu, mips_cdmm_bus_up, &cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ work_on_cpu(cpu, mips_cdmm_bus_down, &cpu);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mips_cdmm_cpu_nb = {
+ .notifier_call = mips_cdmm_cpu_notify,
+};
+
+/**
+ * mips_cdmm_init() - Initialise CDMM bus.
+ *
+ * Initialise CDMM bus, discover CDMM devices for online CPUs, and arrange for
+ * hotplug notifications so the CDMM drivers can be kept up to date.
+ */
+static int __init mips_cdmm_init(void)
+{
+ unsigned int cpu;
+ int ret;
+
+ /* Register the bus */
+ ret = bus_register(&mips_cdmm_bustype);
+ if (ret)
+ return ret;
+
+ /* We want to be notified about new CPUs */
+ ret = register_cpu_notifier(&mips_cdmm_cpu_nb);
+ if (ret) {
+ pr_warn("cdmm: Failed to register CPU notifier\n");
+ goto out;
+ }
+
+ /* Discover devices on CDMM of online CPUs */
+ for_each_online_cpu(cpu)
+ work_on_cpu(cpu, mips_cdmm_bus_up, &cpu);
+
+ return 0;
+out:
+ bus_unregister(&mips_cdmm_bustype);
+ return ret;
+}
+subsys_initcall(mips_cdmm_init);
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index d478ceb69c5f..e43ff53f85a6 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_ARCH_MMP) += mmp/
endif
obj-$(CONFIG_PLAT_ORION) += mvebu/
obj-$(CONFIG_ARCH_MXS) += mxs/
+obj-$(CONFIG_MACH_PISTACHIO) += pistachio/
obj-$(CONFIG_COMMON_CLK_PXA) += pxa/
obj-$(CONFIG_COMMON_CLK_QCOM) += qcom/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index 05abae89262e..a0ef4f75d457 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -15,6 +15,7 @@
#include "clk-kona.h"
#include <linux/delay.h>
+#include <linux/kernel.h>
/*
* "Policies" affect the frequencies of bus clocks provided by a
@@ -51,21 +52,6 @@ static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val)
/* Divider and scaling helpers */
-/*
- * Implement DIV_ROUND_CLOSEST() for 64-bit dividend and both values
- * unsigned. Note that unlike do_div(), the remainder is discarded
- * and the return value is the quotient (not the remainder).
- */
-u64 do_div_round_closest(u64 dividend, unsigned long divisor)
-{
- u64 result;
-
- result = dividend + ((u64)divisor >> 1);
- (void)do_div(result, divisor);
-
- return result;
-}
-
/* Convert a divider into the scaled divisor value it represents. */
static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
{
@@ -87,7 +73,7 @@ u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
combined = (u64)div_value * BILLION + billionths;
combined <<= div->u.s.frac_width;
- return do_div_round_closest(combined, BILLION);
+ return DIV_ROUND_CLOSEST_ULL(combined, BILLION);
}
/* The scaled minimum divisor representable by a divider */
@@ -731,7 +717,7 @@ static unsigned long clk_recalc_rate(struct ccu_data *ccu,
scaled_rate = scale_rate(pre_div, parent_rate);
scaled_rate = scale_rate(div, scaled_rate);
scaled_div = divider_read_scaled(ccu, pre_div);
- scaled_parent_rate = do_div_round_closest(scaled_rate,
+ scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate,
scaled_div);
} else {
scaled_parent_rate = scale_rate(div, parent_rate);
@@ -743,7 +729,7 @@ static unsigned long clk_recalc_rate(struct ccu_data *ccu,
* rate.
*/
scaled_div = divider_read_scaled(ccu, div);
- result = do_div_round_closest(scaled_parent_rate, scaled_div);
+ result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, scaled_div);
return (unsigned long)result;
}
@@ -790,7 +776,7 @@ static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
scaled_rate = scale_rate(pre_div, parent_rate);
scaled_rate = scale_rate(div, scaled_rate);
scaled_pre_div = divider_read_scaled(ccu, pre_div);
- scaled_parent_rate = do_div_round_closest(scaled_rate,
+ scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate,
scaled_pre_div);
} else {
scaled_parent_rate = scale_rate(div, parent_rate);
@@ -802,7 +788,7 @@ static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
* the best we can do.
*/
if (!divider_is_fixed(div)) {
- best_scaled_div = do_div_round_closest(scaled_parent_rate,
+ best_scaled_div = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate,
rate);
min_scaled_div = scaled_div_min(div);
max_scaled_div = scaled_div_max(div);
@@ -815,7 +801,7 @@ static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
}
/* OK, figure out the resulting rate */
- result = do_div_round_closest(scaled_parent_rate, best_scaled_div);
+ result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, best_scaled_div);
if (scaled_div)
*scaled_div = best_scaled_div;
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
index 2537b3072910..6849a64baf6d 100644
--- a/drivers/clk/bcm/clk-kona.h
+++ b/drivers/clk/bcm/clk-kona.h
@@ -503,7 +503,6 @@ extern struct clk_ops kona_peri_clk_ops;
/* Externally visible functions */
-extern u64 do_div_round_closest(u64 dividend, unsigned long divisor);
extern u64 scaled_div_max(struct bcm_clk_div *div);
extern u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value,
u32 billionths);
diff --git a/drivers/clk/pistachio/Makefile b/drivers/clk/pistachio/Makefile
new file mode 100644
index 000000000000..f1e151fbef65
--- /dev/null
+++ b/drivers/clk/pistachio/Makefile
@@ -0,0 +1,3 @@
+obj-y += clk.o
+obj-y += clk-pll.o
+obj-y += clk-pistachio.o
diff --git a/drivers/clk/pistachio/clk-pistachio.c b/drivers/clk/pistachio/clk-pistachio.c
new file mode 100644
index 000000000000..8c0fe8828f99
--- /dev/null
+++ b/drivers/clk/pistachio/clk-pistachio.c
@@ -0,0 +1,329 @@
+/*
+ * Pistachio SoC clock controllers
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+
+#include <dt-bindings/clock/pistachio-clk.h>
+
+#include "clk.h"
+
+static struct pistachio_gate pistachio_gates[] __initdata = {
+ GATE(CLK_MIPS, "mips", "mips_div", 0x104, 0),
+ GATE(CLK_AUDIO_IN, "audio_in", "audio_clk_in_gate", 0x104, 1),
+ GATE(CLK_AUDIO, "audio", "audio_div", 0x104, 2),
+ GATE(CLK_I2S, "i2s", "i2s_div", 0x104, 3),
+ GATE(CLK_SPDIF, "spdif", "spdif_div", 0x104, 4),
+ GATE(CLK_AUDIO_DAC, "audio_dac", "audio_dac_div", 0x104, 5),
+ GATE(CLK_RPU_V, "rpu_v", "rpu_v_div", 0x104, 6),
+ GATE(CLK_RPU_L, "rpu_l", "rpu_l_div", 0x104, 7),
+ GATE(CLK_RPU_SLEEP, "rpu_sleep", "rpu_sleep_div", 0x104, 8),
+ GATE(CLK_WIFI_PLL_GATE, "wifi_pll_gate", "wifi_pll_mux", 0x104, 9),
+ GATE(CLK_RPU_CORE, "rpu_core", "rpu_core_div", 0x104, 10),
+ GATE(CLK_WIFI_ADC, "wifi_adc", "wifi_div8_mux", 0x104, 11),
+ GATE(CLK_WIFI_DAC, "wifi_dac", "wifi_div4_mux", 0x104, 12),
+ GATE(CLK_USB_PHY, "usb_phy", "usb_phy_div", 0x104, 13),
+ GATE(CLK_ENET_IN, "enet_in", "enet_clk_in_gate", 0x104, 14),
+ GATE(CLK_ENET, "enet", "enet_div", 0x104, 15),
+ GATE(CLK_UART0, "uart0", "uart0_div", 0x104, 16),
+ GATE(CLK_UART1, "uart1", "uart1_div", 0x104, 17),
+ GATE(CLK_PERIPH_SYS, "periph_sys", "sys_internal_div", 0x104, 18),
+ GATE(CLK_SPI0, "spi0", "spi0_div", 0x104, 19),
+ GATE(CLK_SPI1, "spi1", "spi1_div", 0x104, 20),
+ GATE(CLK_EVENT_TIMER, "event_timer", "event_timer_div", 0x104, 21),
+ GATE(CLK_AUX_ADC_INTERNAL, "aux_adc_internal", "sys_internal_div",
+ 0x104, 22),
+ GATE(CLK_AUX_ADC, "aux_adc", "aux_adc_div", 0x104, 23),
+ GATE(CLK_SD_HOST, "sd_host", "sd_host_div", 0x104, 24),
+ GATE(CLK_BT, "bt", "bt_div", 0x104, 25),
+ GATE(CLK_BT_DIV4, "bt_div4", "bt_div4_div", 0x104, 26),
+ GATE(CLK_BT_DIV8, "bt_div8", "bt_div8_div", 0x104, 27),
+ GATE(CLK_BT_1MHZ, "bt_1mhz", "bt_1mhz_div", 0x104, 28),
+};
+
+static struct pistachio_fixed_factor pistachio_ffs[] __initdata = {
+ FIXED_FACTOR(CLK_WIFI_DIV4, "wifi_div4", "wifi_pll", 4),
+ FIXED_FACTOR(CLK_WIFI_DIV8, "wifi_div8", "wifi_pll", 8),
+};
+
+static struct pistachio_div pistachio_divs[] __initdata = {
+ DIV(CLK_MIPS_INTERNAL_DIV, "mips_internal_div", "mips_pll_mux",
+ 0x204, 2),
+ DIV(CLK_MIPS_DIV, "mips_div", "mips_internal_div", 0x208, 8),
+ DIV_F(CLK_AUDIO_DIV, "audio_div", "audio_mux",
+ 0x20c, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(CLK_I2S_DIV, "i2s_div", "audio_pll_mux",
+ 0x210, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(CLK_SPDIF_DIV, "spdif_div", "audio_pll_mux",
+ 0x214, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(CLK_AUDIO_DAC_DIV, "audio_dac_div", "audio_pll_mux",
+ 0x218, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV(CLK_RPU_V_DIV, "rpu_v_div", "rpu_v_pll_mux", 0x21c, 2),
+ DIV(CLK_RPU_L_DIV, "rpu_l_div", "rpu_l_mux", 0x220, 2),
+ DIV(CLK_RPU_SLEEP_DIV, "rpu_sleep_div", "xtal", 0x224, 10),
+ DIV(CLK_RPU_CORE_DIV, "rpu_core_div", "rpu_core_mux", 0x228, 3),
+ DIV(CLK_USB_PHY_DIV, "usb_phy_div", "sys_internal_div", 0x22c, 6),
+ DIV(CLK_ENET_DIV, "enet_div", "enet_mux", 0x230, 6),
+ DIV_F(CLK_UART0_INTERNAL_DIV, "uart0_internal_div", "sys_pll_mux",
+ 0x234, 3, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(CLK_UART0_DIV, "uart0_div", "uart0_internal_div", 0x238, 10,
+ CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(CLK_UART1_INTERNAL_DIV, "uart1_internal_div", "sys_pll_mux",
+ 0x23c, 3, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(CLK_UART1_DIV, "uart1_div", "uart1_internal_div", 0x240, 10,
+ CLK_DIVIDER_ROUND_CLOSEST),
+ DIV(CLK_SYS_INTERNAL_DIV, "sys_internal_div", "sys_pll_mux", 0x244, 3),
+ DIV(CLK_SPI0_INTERNAL_DIV, "spi0_internal_div", "sys_pll_mux",
+ 0x248, 3),
+ DIV(CLK_SPI0_DIV, "spi0_div", "spi0_internal_div", 0x24c, 7),
+ DIV(CLK_SPI1_INTERNAL_DIV, "spi1_internal_div", "sys_pll_mux",
+ 0x250, 3),
+ DIV(CLK_SPI1_DIV, "spi1_div", "spi1_internal_div", 0x254, 7),
+ DIV(CLK_EVENT_TIMER_INTERNAL_DIV, "event_timer_internal_div",
+ "event_timer_mux", 0x258, 3),
+ DIV(CLK_EVENT_TIMER_DIV, "event_timer_div", "event_timer_internal_div",
+ 0x25c, 12),
+ DIV(CLK_AUX_ADC_INTERNAL_DIV, "aux_adc_internal_div",
+ "aux_adc_internal", 0x260, 3),
+ DIV(CLK_AUX_ADC_DIV, "aux_adc_div", "aux_adc_internal_div", 0x264, 10),
+ DIV(CLK_SD_HOST_DIV, "sd_host_div", "sd_host_mux", 0x268, 6),
+ DIV(CLK_BT_DIV, "bt_div", "bt_pll_mux", 0x26c, 6),
+ DIV(CLK_BT_DIV4_DIV, "bt_div4_div", "bt_pll_mux", 0x270, 6),
+ DIV(CLK_BT_DIV8_DIV, "bt_div8_div", "bt_pll_mux", 0x274, 6),
+ DIV(CLK_BT_1MHZ_INTERNAL_DIV, "bt_1mhz_internal_div", "bt_pll_mux",
+ 0x278, 3),
+ DIV(CLK_BT_1MHZ_DIV, "bt_1mhz_div", "bt_1mhz_internal_div", 0x27c, 10),
+};
+
+PNAME(mux_xtal_audio_refclk) = { "xtal", "audio_clk_in_gate" };
+PNAME(mux_xtal_mips) = { "xtal", "mips_pll" };
+PNAME(mux_xtal_audio) = { "xtal", "audio_pll", "audio_in" };
+PNAME(mux_audio_debug) = { "audio_pll_mux", "debug_mux" };
+PNAME(mux_xtal_rpu_v) = { "xtal", "rpu_v_pll" };
+PNAME(mux_xtal_rpu_l) = { "xtal", "rpu_l_pll" };
+PNAME(mux_rpu_l_mips) = { "rpu_l_pll_mux", "mips_pll_mux" };
+PNAME(mux_xtal_wifi) = { "xtal", "wifi_pll" };
+PNAME(mux_xtal_wifi_div4) = { "xtal", "wifi_div4" };
+PNAME(mux_xtal_wifi_div8) = { "xtal", "wifi_div8" };
+PNAME(mux_wifi_div4_rpu_l) = { "wifi_pll_gate", "wifi_div4_mux",
+ "rpu_l_pll_mux" };
+PNAME(mux_xtal_sys) = { "xtal", "sys_pll" };
+PNAME(mux_sys_enet) = { "sys_internal_div", "enet_in" };
+PNAME(mux_audio_sys) = { "audio_pll_mux", "sys_internal_div" };
+PNAME(mux_sys_bt) = { "sys_internal_div", "bt_pll_mux" };
+PNAME(mux_xtal_bt) = { "xtal", "bt_pll" };
+
+static struct pistachio_mux pistachio_muxes[] __initdata = {
+ MUX(CLK_AUDIO_REF_MUX, "audio_refclk_mux", mux_xtal_audio_refclk,
+ 0x200, 0),
+ MUX(CLK_MIPS_PLL_MUX, "mips_pll_mux", mux_xtal_mips, 0x200, 1),
+ MUX(CLK_AUDIO_PLL_MUX, "audio_pll_mux", mux_xtal_audio, 0x200, 2),
+ MUX(CLK_AUDIO_MUX, "audio_mux", mux_audio_debug, 0x200, 4),
+ MUX(CLK_RPU_V_PLL_MUX, "rpu_v_pll_mux", mux_xtal_rpu_v, 0x200, 5),
+ MUX(CLK_RPU_L_PLL_MUX, "rpu_l_pll_mux", mux_xtal_rpu_l, 0x200, 6),
+ MUX(CLK_RPU_L_MUX, "rpu_l_mux", mux_rpu_l_mips, 0x200, 7),
+ MUX(CLK_WIFI_PLL_MUX, "wifi_pll_mux", mux_xtal_wifi, 0x200, 8),
+ MUX(CLK_WIFI_DIV4_MUX, "wifi_div4_mux", mux_xtal_wifi_div4, 0x200, 9),
+ MUX(CLK_WIFI_DIV8_MUX, "wifi_div8_mux", mux_xtal_wifi_div8, 0x200, 10),
+ MUX(CLK_RPU_CORE_MUX, "rpu_core_mux", mux_wifi_div4_rpu_l, 0x200, 11),
+ MUX(CLK_SYS_PLL_MUX, "sys_pll_mux", mux_xtal_sys, 0x200, 13),
+ MUX(CLK_ENET_MUX, "enet_mux", mux_sys_enet, 0x200, 14),
+ MUX(CLK_EVENT_TIMER_MUX, "event_timer_mux", mux_audio_sys, 0x200, 15),
+ MUX(CLK_SD_HOST_MUX, "sd_host_mux", mux_sys_bt, 0x200, 16),
+ MUX(CLK_BT_PLL_MUX, "bt_pll_mux", mux_xtal_bt, 0x200, 17),
+};
+
+static struct pistachio_pll pistachio_plls[] __initdata = {
+ PLL_FIXED(CLK_MIPS_PLL, "mips_pll", "xtal", PLL_GF40LP_LAINT, 0x0),
+ PLL_FIXED(CLK_AUDIO_PLL, "audio_pll", "audio_refclk_mux",
+ PLL_GF40LP_FRAC, 0xc),
+ PLL_FIXED(CLK_RPU_V_PLL, "rpu_v_pll", "xtal", PLL_GF40LP_LAINT, 0x20),
+ PLL_FIXED(CLK_RPU_L_PLL, "rpu_l_pll", "xtal", PLL_GF40LP_LAINT, 0x2c),
+ PLL_FIXED(CLK_SYS_PLL, "sys_pll", "xtal", PLL_GF40LP_FRAC, 0x38),
+ PLL_FIXED(CLK_WIFI_PLL, "wifi_pll", "xtal", PLL_GF40LP_FRAC, 0x4c),
+ PLL_FIXED(CLK_BT_PLL, "bt_pll", "xtal", PLL_GF40LP_LAINT, 0x60),
+};
+
+PNAME(mux_debug) = { "mips_pll_mux", "rpu_v_pll_mux",
+ "rpu_l_pll_mux", "sys_pll_mux",
+ "wifi_pll_mux", "bt_pll_mux" };
+static u32 mux_debug_idx[] = { 0x0, 0x1, 0x2, 0x4, 0x8, 0x10 };
+
+static unsigned int pistachio_critical_clks[] __initdata = {
+ CLK_MIPS,
+ CLK_PERIPH_SYS,
+};
+
+static void __init pistachio_clk_init(struct device_node *np)
+{
+ struct pistachio_clk_provider *p;
+ struct clk *debug_clk;
+
+ p = pistachio_clk_alloc_provider(np, CLK_NR_CLKS);
+ if (!p)
+ return;
+
+ pistachio_clk_register_pll(p, pistachio_plls,
+ ARRAY_SIZE(pistachio_plls));
+ pistachio_clk_register_mux(p, pistachio_muxes,
+ ARRAY_SIZE(pistachio_muxes));
+ pistachio_clk_register_div(p, pistachio_divs,
+ ARRAY_SIZE(pistachio_divs));
+ pistachio_clk_register_fixed_factor(p, pistachio_ffs,
+ ARRAY_SIZE(pistachio_ffs));
+ pistachio_clk_register_gate(p, pistachio_gates,
+ ARRAY_SIZE(pistachio_gates));
+
+ debug_clk = clk_register_mux_table(NULL, "debug_mux", mux_debug,
+ ARRAY_SIZE(mux_debug),
+ CLK_SET_RATE_NO_REPARENT,
+ p->base + 0x200, 18, 0x1f, 0,
+ mux_debug_idx, NULL);
+ p->clk_data.clks[CLK_DEBUG_MUX] = debug_clk;
+
+ pistachio_clk_register_provider(p);
+
+ pistachio_clk_force_enable(p, pistachio_critical_clks,
+ ARRAY_SIZE(pistachio_critical_clks));
+}
+CLK_OF_DECLARE(pistachio_clk, "img,pistachio-clk", pistachio_clk_init);
+
+static struct pistachio_gate pistachio_periph_gates[] __initdata = {
+ GATE(PERIPH_CLK_SYS, "sys", "periph_sys", 0x100, 0),
+ GATE(PERIPH_CLK_SYS_BUS, "bus_sys", "periph_sys", 0x100, 1),
+ GATE(PERIPH_CLK_DDR, "ddr", "periph_sys", 0x100, 2),
+ GATE(PERIPH_CLK_ROM, "rom", "rom_div", 0x100, 3),
+ GATE(PERIPH_CLK_COUNTER_FAST, "counter_fast", "counter_fast_div",
+ 0x100, 4),
+ GATE(PERIPH_CLK_COUNTER_SLOW, "counter_slow", "counter_slow_div",
+ 0x100, 5),
+ GATE(PERIPH_CLK_IR, "ir", "ir_div", 0x100, 6),
+ GATE(PERIPH_CLK_WD, "wd", "wd_div", 0x100, 7),
+ GATE(PERIPH_CLK_PDM, "pdm", "pdm_div", 0x100, 8),
+ GATE(PERIPH_CLK_PWM, "pwm", "pwm_div", 0x100, 9),
+ GATE(PERIPH_CLK_I2C0, "i2c0", "i2c0_div", 0x100, 10),
+ GATE(PERIPH_CLK_I2C1, "i2c1", "i2c1_div", 0x100, 11),
+ GATE(PERIPH_CLK_I2C2, "i2c2", "i2c2_div", 0x100, 12),
+ GATE(PERIPH_CLK_I2C3, "i2c3", "i2c3_div", 0x100, 13),
+};
+
+static struct pistachio_div pistachio_periph_divs[] __initdata = {
+ DIV(PERIPH_CLK_ROM_DIV, "rom_div", "periph_sys", 0x10c, 7),
+ DIV(PERIPH_CLK_COUNTER_FAST_DIV, "counter_fast_div", "periph_sys",
+ 0x110, 7),
+ DIV(PERIPH_CLK_COUNTER_SLOW_PRE_DIV, "counter_slow_pre_div",
+ "periph_sys", 0x114, 7),
+ DIV(PERIPH_CLK_COUNTER_SLOW_DIV, "counter_slow_div",
+ "counter_slow_pre_div", 0x118, 7),
+ DIV_F(PERIPH_CLK_IR_PRE_DIV, "ir_pre_div", "periph_sys", 0x11c, 7,
+ CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(PERIPH_CLK_IR_DIV, "ir_div", "ir_pre_div", 0x120, 7,
+ CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(PERIPH_CLK_WD_PRE_DIV, "wd_pre_div", "periph_sys", 0x124, 7,
+ CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(PERIPH_CLK_WD_DIV, "wd_div", "wd_pre_div", 0x128, 7,
+ CLK_DIVIDER_ROUND_CLOSEST),
+ DIV(PERIPH_CLK_PDM_PRE_DIV, "pdm_pre_div", "periph_sys", 0x12c, 7),
+ DIV(PERIPH_CLK_PDM_DIV, "pdm_div", "pdm_pre_div", 0x130, 7),
+ DIV(PERIPH_CLK_PWM_PRE_DIV, "pwm_pre_div", "periph_sys", 0x134, 7),
+ DIV(PERIPH_CLK_PWM_DIV, "pwm_div", "pwm_pre_div", 0x138, 7),
+ DIV(PERIPH_CLK_I2C0_PRE_DIV, "i2c0_pre_div", "periph_sys", 0x13c, 7),
+ DIV(PERIPH_CLK_I2C0_DIV, "i2c0_div", "i2c0_pre_div", 0x140, 7),
+ DIV(PERIPH_CLK_I2C1_PRE_DIV, "i2c1_pre_div", "periph_sys", 0x144, 7),
+ DIV(PERIPH_CLK_I2C1_DIV, "i2c1_div", "i2c1_pre_div", 0x148, 7),
+ DIV(PERIPH_CLK_I2C2_PRE_DIV, "i2c2_pre_div", "periph_sys", 0x14c, 7),
+ DIV(PERIPH_CLK_I2C2_DIV, "i2c2_div", "i2c2_pre_div", 0x150, 7),
+ DIV(PERIPH_CLK_I2C3_PRE_DIV, "i2c3_pre_div", "periph_sys", 0x154, 7),
+ DIV(PERIPH_CLK_I2C3_DIV, "i2c3_div", "i2c3_pre_div", 0x158, 7),
+};
+
+static void __init pistachio_clk_periph_init(struct device_node *np)
+{
+ struct pistachio_clk_provider *p;
+
+ p = pistachio_clk_alloc_provider(np, PERIPH_CLK_NR_CLKS);
+ if (!p)
+ return;
+
+ pistachio_clk_register_div(p, pistachio_periph_divs,
+ ARRAY_SIZE(pistachio_periph_divs));
+ pistachio_clk_register_gate(p, pistachio_periph_gates,
+ ARRAY_SIZE(pistachio_periph_gates));
+
+ pistachio_clk_register_provider(p);
+}
+CLK_OF_DECLARE(pistachio_clk_periph, "img,pistachio-clk-periph",
+ pistachio_clk_periph_init);
+
+static struct pistachio_gate pistachio_sys_gates[] __initdata = {
+ GATE(SYS_CLK_I2C0, "i2c0_sys", "sys", 0x8, 0),
+ GATE(SYS_CLK_I2C1, "i2c1_sys", "sys", 0x8, 1),
+ GATE(SYS_CLK_I2C2, "i2c2_sys", "sys", 0x8, 2),
+ GATE(SYS_CLK_I2C3, "i2c3_sys", "sys", 0x8, 3),
+ GATE(SYS_CLK_I2S_IN, "i2s_in_sys", "sys", 0x8, 4),
+ GATE(SYS_CLK_PAUD_OUT, "paud_out_sys", "sys", 0x8, 5),
+ GATE(SYS_CLK_SPDIF_OUT, "spdif_out_sys", "sys", 0x8, 6),
+ GATE(SYS_CLK_SPI0_MASTER, "spi0_master_sys", "sys", 0x8, 7),
+ GATE(SYS_CLK_SPI0_SLAVE, "spi0_slave_sys", "sys", 0x8, 8),
+ GATE(SYS_CLK_PWM, "pwm_sys", "sys", 0x8, 9),
+ GATE(SYS_CLK_UART0, "uart0_sys", "sys", 0x8, 10),
+ GATE(SYS_CLK_UART1, "uart1_sys", "sys", 0x8, 11),
+ GATE(SYS_CLK_SPI1, "spi1_sys", "sys", 0x8, 12),
+ GATE(SYS_CLK_MDC, "mdc_sys", "sys", 0x8, 13),
+ GATE(SYS_CLK_SD_HOST, "sd_host_sys", "sys", 0x8, 14),
+ GATE(SYS_CLK_ENET, "enet_sys", "sys", 0x8, 15),
+ GATE(SYS_CLK_IR, "ir_sys", "sys", 0x8, 16),
+ GATE(SYS_CLK_WD, "wd_sys", "sys", 0x8, 17),
+ GATE(SYS_CLK_TIMER, "timer_sys", "sys", 0x8, 18),
+ GATE(SYS_CLK_I2S_OUT, "i2s_out_sys", "sys", 0x8, 24),
+ GATE(SYS_CLK_SPDIF_IN, "spdif_in_sys", "sys", 0x8, 25),
+ GATE(SYS_CLK_EVENT_TIMER, "event_timer_sys", "sys", 0x8, 26),
+ GATE(SYS_CLK_HASH, "hash_sys", "sys", 0x8, 27),
+};
+
+static void __init pistachio_cr_periph_init(struct device_node *np)
+{
+ struct pistachio_clk_provider *p;
+
+ p = pistachio_clk_alloc_provider(np, SYS_CLK_NR_CLKS);
+ if (!p)
+ return;
+
+ pistachio_clk_register_gate(p, pistachio_sys_gates,
+ ARRAY_SIZE(pistachio_sys_gates));
+
+ pistachio_clk_register_provider(p);
+}
+CLK_OF_DECLARE(pistachio_cr_periph, "img,pistachio-cr-periph",
+ pistachio_cr_periph_init);
+
+static struct pistachio_gate pistachio_ext_gates[] __initdata = {
+ GATE(EXT_CLK_ENET_IN, "enet_clk_in_gate", "enet_clk_in", 0x58, 5),
+ GATE(EXT_CLK_AUDIO_IN, "audio_clk_in_gate", "audio_clk_in", 0x58, 8)
+};
+
+static void __init pistachio_cr_top_init(struct device_node *np)
+{
+ struct pistachio_clk_provider *p;
+
+ p = pistachio_clk_alloc_provider(np, EXT_CLK_NR_CLKS);
+ if (!p)
+ return;
+
+ pistachio_clk_register_gate(p, pistachio_ext_gates,
+ ARRAY_SIZE(pistachio_ext_gates));
+
+ pistachio_clk_register_provider(p);
+}
+CLK_OF_DECLARE(pistachio_cr_top, "img,pistachio-cr-top",
+ pistachio_cr_top_init);
diff --git a/drivers/clk/pistachio/clk-pll.c b/drivers/clk/pistachio/clk-pll.c
new file mode 100644
index 000000000000..de537560bf70
--- /dev/null
+++ b/drivers/clk/pistachio/clk-pll.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+#define PLL_STATUS 0x0
+#define PLL_STATUS_LOCK BIT(0)
+
+#define PLL_CTRL1 0x4
+#define PLL_CTRL1_REFDIV_SHIFT 0
+#define PLL_CTRL1_REFDIV_MASK 0x3f
+#define PLL_CTRL1_FBDIV_SHIFT 6
+#define PLL_CTRL1_FBDIV_MASK 0xfff
+#define PLL_INT_CTRL1_POSTDIV1_SHIFT 18
+#define PLL_INT_CTRL1_POSTDIV1_MASK 0x7
+#define PLL_INT_CTRL1_POSTDIV2_SHIFT 21
+#define PLL_INT_CTRL1_POSTDIV2_MASK 0x7
+#define PLL_INT_CTRL1_PD BIT(24)
+#define PLL_INT_CTRL1_DSMPD BIT(25)
+#define PLL_INT_CTRL1_FOUTPOSTDIVPD BIT(26)
+#define PLL_INT_CTRL1_FOUTVCOPD BIT(27)
+
+#define PLL_CTRL2 0x8
+#define PLL_FRAC_CTRL2_FRAC_SHIFT 0
+#define PLL_FRAC_CTRL2_FRAC_MASK 0xffffff
+#define PLL_FRAC_CTRL2_POSTDIV1_SHIFT 24
+#define PLL_FRAC_CTRL2_POSTDIV1_MASK 0x7
+#define PLL_FRAC_CTRL2_POSTDIV2_SHIFT 27
+#define PLL_FRAC_CTRL2_POSTDIV2_MASK 0x7
+#define PLL_INT_CTRL2_BYPASS BIT(28)
+
+#define PLL_CTRL3 0xc
+#define PLL_FRAC_CTRL3_PD BIT(0)
+#define PLL_FRAC_CTRL3_DACPD BIT(1)
+#define PLL_FRAC_CTRL3_DSMPD BIT(2)
+#define PLL_FRAC_CTRL3_FOUTPOSTDIVPD BIT(3)
+#define PLL_FRAC_CTRL3_FOUT4PHASEPD BIT(4)
+#define PLL_FRAC_CTRL3_FOUTVCOPD BIT(5)
+
+#define PLL_CTRL4 0x10
+#define PLL_FRAC_CTRL4_BYPASS BIT(28)
+
+struct pistachio_clk_pll {
+ struct clk_hw hw;
+ void __iomem *base;
+ struct pistachio_pll_rate_table *rates;
+ unsigned int nr_rates;
+};
+
+static inline u32 pll_readl(struct pistachio_clk_pll *pll, u32 reg)
+{
+ return readl(pll->base + reg);
+}
+
+static inline void pll_writel(struct pistachio_clk_pll *pll, u32 val, u32 reg)
+{
+ writel(val, pll->base + reg);
+}
+
+static inline u32 do_div_round_closest(u64 dividend, u32 divisor)
+{
+ dividend += divisor / 2;
+ do_div(dividend, divisor);
+
+ return dividend;
+}
+
+static inline struct pistachio_clk_pll *to_pistachio_pll(struct clk_hw *hw)
+{
+ return container_of(hw, struct pistachio_clk_pll, hw);
+}
+
+static struct pistachio_pll_rate_table *
+pll_get_params(struct pistachio_clk_pll *pll, unsigned long fref,
+ unsigned long fout)
+{
+ unsigned int i;
+
+ for (i = 0; i < pll->nr_rates; i++) {
+ if (pll->rates[i].fref == fref && pll->rates[i].fout == fout)
+ return &pll->rates[i];
+ }
+
+ return NULL;
+}
+
+static long pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ unsigned int i;
+
+ for (i = 0; i < pll->nr_rates; i++) {
+ if (i > 0 && pll->rates[i].fref == *parent_rate &&
+ pll->rates[i].fout <= rate)
+ return pll->rates[i - 1].fout;
+ }
+
+ return pll->rates[0].fout;
+}
+
+static int pll_gf40lp_frac_enable(struct clk_hw *hw)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ u32 val;
+
+ val = pll_readl(pll, PLL_CTRL3);
+ val &= ~(PLL_FRAC_CTRL3_PD | PLL_FRAC_CTRL3_DACPD |
+ PLL_FRAC_CTRL3_DSMPD | PLL_FRAC_CTRL3_FOUTPOSTDIVPD |
+ PLL_FRAC_CTRL3_FOUT4PHASEPD | PLL_FRAC_CTRL3_FOUTVCOPD);
+ pll_writel(pll, val, PLL_CTRL3);
+
+ val = pll_readl(pll, PLL_CTRL4);
+ val &= ~PLL_FRAC_CTRL4_BYPASS;
+ pll_writel(pll, val, PLL_CTRL4);
+
+ return 0;
+}
+
+static void pll_gf40lp_frac_disable(struct clk_hw *hw)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ u32 val;
+
+ val = pll_readl(pll, PLL_CTRL3);
+ val |= PLL_FRAC_CTRL3_PD;
+ pll_writel(pll, val, PLL_CTRL3);
+}
+
+static int pll_gf40lp_frac_is_enabled(struct clk_hw *hw)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+
+ return !(pll_readl(pll, PLL_CTRL3) & PLL_FRAC_CTRL3_PD);
+}
+
+static int pll_gf40lp_frac_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ struct pistachio_pll_rate_table *params;
+ bool was_enabled;
+ u32 val;
+
+ params = pll_get_params(pll, parent_rate, rate);
+ if (!params)
+ return -EINVAL;
+
+ was_enabled = pll_gf40lp_frac_is_enabled(hw);
+ if (!was_enabled)
+ pll_gf40lp_frac_enable(hw);
+
+ val = pll_readl(pll, PLL_CTRL1);
+ val &= ~((PLL_CTRL1_REFDIV_MASK << PLL_CTRL1_REFDIV_SHIFT) |
+ (PLL_CTRL1_FBDIV_MASK << PLL_CTRL1_FBDIV_SHIFT));
+ val |= (params->refdiv << PLL_CTRL1_REFDIV_SHIFT) |
+ (params->fbdiv << PLL_CTRL1_FBDIV_SHIFT);
+ pll_writel(pll, val, PLL_CTRL1);
+
+ val = pll_readl(pll, PLL_CTRL2);
+ val &= ~((PLL_FRAC_CTRL2_FRAC_MASK << PLL_FRAC_CTRL2_FRAC_SHIFT) |
+ (PLL_FRAC_CTRL2_POSTDIV1_MASK <<
+ PLL_FRAC_CTRL2_POSTDIV1_SHIFT) |
+ (PLL_FRAC_CTRL2_POSTDIV2_MASK <<
+ PLL_FRAC_CTRL2_POSTDIV2_SHIFT));
+ val |= (params->frac << PLL_FRAC_CTRL2_FRAC_SHIFT) |
+ (params->postdiv1 << PLL_FRAC_CTRL2_POSTDIV1_SHIFT) |
+ (params->postdiv2 << PLL_FRAC_CTRL2_POSTDIV2_SHIFT);
+ pll_writel(pll, val, PLL_CTRL2);
+
+ while (!(pll_readl(pll, PLL_STATUS) & PLL_STATUS_LOCK))
+ cpu_relax();
+
+ if (!was_enabled)
+ pll_gf40lp_frac_disable(hw);
+
+ return 0;
+}
+
+static unsigned long pll_gf40lp_frac_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ u32 val, prediv, fbdiv, frac, postdiv1, postdiv2;
+ u64 rate = parent_rate;
+
+ val = pll_readl(pll, PLL_CTRL1);
+ prediv = (val >> PLL_CTRL1_REFDIV_SHIFT) & PLL_CTRL1_REFDIV_MASK;
+ fbdiv = (val >> PLL_CTRL1_FBDIV_SHIFT) & PLL_CTRL1_FBDIV_MASK;
+
+ val = pll_readl(pll, PLL_CTRL2);
+ postdiv1 = (val >> PLL_FRAC_CTRL2_POSTDIV1_SHIFT) &
+ PLL_FRAC_CTRL2_POSTDIV1_MASK;
+ postdiv2 = (val >> PLL_FRAC_CTRL2_POSTDIV2_SHIFT) &
+ PLL_FRAC_CTRL2_POSTDIV2_MASK;
+ frac = (val >> PLL_FRAC_CTRL2_FRAC_SHIFT) & PLL_FRAC_CTRL2_FRAC_MASK;
+
+ rate *= (fbdiv << 24) + frac;
+ rate = do_div_round_closest(rate, (prediv * postdiv1 * postdiv2) << 24);
+
+ return rate;
+}
+
+static struct clk_ops pll_gf40lp_frac_ops = {
+ .enable = pll_gf40lp_frac_enable,
+ .disable = pll_gf40lp_frac_disable,
+ .is_enabled = pll_gf40lp_frac_is_enabled,
+ .recalc_rate = pll_gf40lp_frac_recalc_rate,
+ .round_rate = pll_round_rate,
+ .set_rate = pll_gf40lp_frac_set_rate,
+};
+
+static struct clk_ops pll_gf40lp_frac_fixed_ops = {
+ .enable = pll_gf40lp_frac_enable,
+ .disable = pll_gf40lp_frac_disable,
+ .is_enabled = pll_gf40lp_frac_is_enabled,
+ .recalc_rate = pll_gf40lp_frac_recalc_rate,
+};
+
+static int pll_gf40lp_laint_enable(struct clk_hw *hw)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ u32 val;
+
+ val = pll_readl(pll, PLL_CTRL1);
+ val &= ~(PLL_INT_CTRL1_PD | PLL_INT_CTRL1_DSMPD |
+ PLL_INT_CTRL1_FOUTPOSTDIVPD | PLL_INT_CTRL1_FOUTVCOPD);
+ pll_writel(pll, val, PLL_CTRL1);
+
+ val = pll_readl(pll, PLL_CTRL2);
+ val &= ~PLL_INT_CTRL2_BYPASS;
+ pll_writel(pll, val, PLL_CTRL2);
+
+ return 0;
+}
+
+static void pll_gf40lp_laint_disable(struct clk_hw *hw)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ u32 val;
+
+ val = pll_readl(pll, PLL_CTRL1);
+ val |= PLL_INT_CTRL1_PD;
+ pll_writel(pll, val, PLL_CTRL1);
+}
+
+static int pll_gf40lp_laint_is_enabled(struct clk_hw *hw)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+
+ return !(pll_readl(pll, PLL_CTRL1) & PLL_INT_CTRL1_PD);
+}
+
+static int pll_gf40lp_laint_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ struct pistachio_pll_rate_table *params;
+ bool was_enabled;
+ u32 val;
+
+ params = pll_get_params(pll, parent_rate, rate);
+ if (!params)
+ return -EINVAL;
+
+ was_enabled = pll_gf40lp_laint_is_enabled(hw);
+ if (!was_enabled)
+ pll_gf40lp_laint_enable(hw);
+
+ val = pll_readl(pll, PLL_CTRL1);
+ val &= ~((PLL_CTRL1_REFDIV_MASK << PLL_CTRL1_REFDIV_SHIFT) |
+ (PLL_CTRL1_FBDIV_MASK << PLL_CTRL1_FBDIV_SHIFT) |
+ (PLL_INT_CTRL1_POSTDIV1_MASK << PLL_INT_CTRL1_POSTDIV1_SHIFT) |
+ (PLL_INT_CTRL1_POSTDIV2_MASK << PLL_INT_CTRL1_POSTDIV2_SHIFT));
+ val |= (params->refdiv << PLL_CTRL1_REFDIV_SHIFT) |
+ (params->fbdiv << PLL_CTRL1_FBDIV_SHIFT) |
+ (params->postdiv1 << PLL_INT_CTRL1_POSTDIV1_SHIFT) |
+ (params->postdiv2 << PLL_INT_CTRL1_POSTDIV2_SHIFT);
+ pll_writel(pll, val, PLL_CTRL1);
+
+ while (!(pll_readl(pll, PLL_STATUS) & PLL_STATUS_LOCK))
+ cpu_relax();
+
+ if (!was_enabled)
+ pll_gf40lp_laint_disable(hw);
+
+ return 0;
+}
+
+static unsigned long pll_gf40lp_laint_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ u32 val, prediv, fbdiv, postdiv1, postdiv2;
+ u64 rate = parent_rate;
+
+ val = pll_readl(pll, PLL_CTRL1);
+ prediv = (val >> PLL_CTRL1_REFDIV_SHIFT) & PLL_CTRL1_REFDIV_MASK;
+ fbdiv = (val >> PLL_CTRL1_FBDIV_SHIFT) & PLL_CTRL1_FBDIV_MASK;
+ postdiv1 = (val >> PLL_INT_CTRL1_POSTDIV1_SHIFT) &
+ PLL_INT_CTRL1_POSTDIV1_MASK;
+ postdiv2 = (val >> PLL_INT_CTRL1_POSTDIV2_SHIFT) &
+ PLL_INT_CTRL1_POSTDIV2_MASK;
+
+ rate *= fbdiv;
+ rate = do_div_round_closest(rate, prediv * postdiv1 * postdiv2);
+
+ return rate;
+}
+
+static struct clk_ops pll_gf40lp_laint_ops = {
+ .enable = pll_gf40lp_laint_enable,
+ .disable = pll_gf40lp_laint_disable,
+ .is_enabled = pll_gf40lp_laint_is_enabled,
+ .recalc_rate = pll_gf40lp_laint_recalc_rate,
+ .round_rate = pll_round_rate,
+ .set_rate = pll_gf40lp_laint_set_rate,
+};
+
+static struct clk_ops pll_gf40lp_laint_fixed_ops = {
+ .enable = pll_gf40lp_laint_enable,
+ .disable = pll_gf40lp_laint_disable,
+ .is_enabled = pll_gf40lp_laint_is_enabled,
+ .recalc_rate = pll_gf40lp_laint_recalc_rate,
+};
+
+static struct clk *pll_register(const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *base,
+ enum pistachio_pll_type type,
+ struct pistachio_pll_rate_table *rates,
+ unsigned int nr_rates)
+{
+ struct pistachio_clk_pll *pll;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.flags = flags | CLK_GET_RATE_NOCACHE;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ switch (type) {
+ case PLL_GF40LP_FRAC:
+ if (rates)
+ init.ops = &pll_gf40lp_frac_ops;
+ else
+ init.ops = &pll_gf40lp_frac_fixed_ops;
+ break;
+ case PLL_GF40LP_LAINT:
+ if (rates)
+ init.ops = &pll_gf40lp_laint_ops;
+ else
+ init.ops = &pll_gf40lp_laint_fixed_ops;
+ break;
+ default:
+ pr_err("Unrecognized PLL type %u\n", type);
+ kfree(pll);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pll->hw.init = &init;
+ pll->base = base;
+ pll->rates = rates;
+ pll->nr_rates = nr_rates;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
+void pistachio_clk_register_pll(struct pistachio_clk_provider *p,
+ struct pistachio_pll *pll,
+ unsigned int num)
+{
+ struct clk *clk;
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ clk = pll_register(pll[i].name, pll[i].parent,
+ 0, p->base + pll[i].reg_base,
+ pll[i].type, pll[i].rates,
+ pll[i].nr_rates);
+ p->clk_data.clks[pll[i].id] = clk;
+ }
+}
diff --git a/drivers/clk/pistachio/clk.c b/drivers/clk/pistachio/clk.c
new file mode 100644
index 000000000000..85faa83e1bd7
--- /dev/null
+++ b/drivers/clk/pistachio/clk.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+struct pistachio_clk_provider *
+pistachio_clk_alloc_provider(struct device_node *node, unsigned int num_clks)
+{
+ struct pistachio_clk_provider *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return p;
+
+ p->clk_data.clks = kcalloc(num_clks, sizeof(struct clk *), GFP_KERNEL);
+ if (!p->clk_data.clks)
+ goto free_provider;
+ p->clk_data.clk_num = num_clks;
+ p->node = node;
+ p->base = of_iomap(node, 0);
+ if (!p->base) {
+ pr_err("Failed to map clock provider registers\n");
+ goto free_clks;
+ }
+
+ return p;
+
+free_clks:
+ kfree(p->clk_data.clks);
+free_provider:
+ kfree(p);
+ return NULL;
+}
+
+void pistachio_clk_register_provider(struct pistachio_clk_provider *p)
+{
+ unsigned int i;
+
+ for (i = 0; i < p->clk_data.clk_num; i++) {
+ if (IS_ERR(p->clk_data.clks[i]))
+ pr_warn("Failed to register clock %d: %ld\n", i,
+ PTR_ERR(p->clk_data.clks[i]));
+ }
+
+ of_clk_add_provider(p->node, of_clk_src_onecell_get, &p->clk_data);
+}
+
+void pistachio_clk_register_gate(struct pistachio_clk_provider *p,
+ struct pistachio_gate *gate,
+ unsigned int num)
+{
+ struct clk *clk;
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ clk = clk_register_gate(NULL, gate[i].name, gate[i].parent,
+ CLK_SET_RATE_PARENT,
+ p->base + gate[i].reg, gate[i].shift,
+ 0, NULL);
+ p->clk_data.clks[gate[i].id] = clk;
+ }
+}
+
+void pistachio_clk_register_mux(struct pistachio_clk_provider *p,
+ struct pistachio_mux *mux,
+ unsigned int num)
+{
+ struct clk *clk;
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ clk = clk_register_mux(NULL, mux[i].name, mux[i].parents,
+ mux[i].num_parents,
+ CLK_SET_RATE_NO_REPARENT,
+ p->base + mux[i].reg, mux[i].shift,
+ get_count_order(mux[i].num_parents),
+ 0, NULL);
+ p->clk_data.clks[mux[i].id] = clk;
+ }
+}
+
+void pistachio_clk_register_div(struct pistachio_clk_provider *p,
+ struct pistachio_div *div,
+ unsigned int num)
+{
+ struct clk *clk;
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ clk = clk_register_divider(NULL, div[i].name, div[i].parent,
+ 0, p->base + div[i].reg, 0,
+ div[i].width, div[i].div_flags,
+ NULL);
+ p->clk_data.clks[div[i].id] = clk;
+ }
+}
+
+void pistachio_clk_register_fixed_factor(struct pistachio_clk_provider *p,
+ struct pistachio_fixed_factor *ff,
+ unsigned int num)
+{
+ struct clk *clk;
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ clk = clk_register_fixed_factor(NULL, ff[i].name, ff[i].parent,
+ 0, 1, ff[i].div);
+ p->clk_data.clks[ff[i].id] = clk;
+ }
+}
+
+void pistachio_clk_force_enable(struct pistachio_clk_provider *p,
+ unsigned int *clk_ids, unsigned int num)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < num; i++) {
+ struct clk *clk = p->clk_data.clks[clk_ids[i]];
+
+ if (IS_ERR(clk))
+ continue;
+
+ err = clk_prepare_enable(clk);
+ if (err)
+ pr_err("Failed to enable clock %s: %d\n",
+ __clk_get_name(clk), err);
+ }
+}
diff --git a/drivers/clk/pistachio/clk.h b/drivers/clk/pistachio/clk.h
new file mode 100644
index 000000000000..52fabbc24624
--- /dev/null
+++ b/drivers/clk/pistachio/clk.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef __PISTACHIO_CLK_H
+#define __PISTACHIO_CLK_H
+
+#include <linux/clk-provider.h>
+
+struct pistachio_gate {
+ unsigned int id;
+ unsigned long reg;
+ unsigned int shift;
+ const char *name;
+ const char *parent;
+};
+
+#define GATE(_id, _name, _pname, _reg, _shift) \
+ { \
+ .id = _id, \
+ .reg = _reg, \
+ .shift = _shift, \
+ .name = _name, \
+ .parent = _pname, \
+ }
+
+struct pistachio_mux {
+ unsigned int id;
+ unsigned long reg;
+ unsigned int shift;
+ unsigned int num_parents;
+ const char *name;
+ const char **parents;
+};
+
+#define PNAME(x) static const char *x[] __initconst
+
+#define MUX(_id, _name, _pnames, _reg, _shift) \
+ { \
+ .id = _id, \
+ .reg = _reg, \
+ .shift = _shift, \
+ .name = _name, \
+ .parents = _pnames, \
+ .num_parents = ARRAY_SIZE(_pnames) \
+ }
+
+
+struct pistachio_div {
+ unsigned int id;
+ unsigned long reg;
+ unsigned int width;
+ unsigned int div_flags;
+ const char *name;
+ const char *parent;
+};
+
+#define DIV(_id, _name, _pname, _reg, _width) \
+ { \
+ .id = _id, \
+ .reg = _reg, \
+ .width = _width, \
+ .div_flags = 0, \
+ .name = _name, \
+ .parent = _pname, \
+ }
+
+#define DIV_F(_id, _name, _pname, _reg, _width, _div_flags) \
+ { \
+ .id = _id, \
+ .reg = _reg, \
+ .width = _width, \
+ .div_flags = _div_flags, \
+ .name = _name, \
+ .parent = _pname, \
+ }
+
+struct pistachio_fixed_factor {
+ unsigned int id;
+ unsigned int div;
+ const char *name;
+ const char *parent;
+};
+
+#define FIXED_FACTOR(_id, _name, _pname, _div) \
+ { \
+ .id = _id, \
+ .div = _div, \
+ .name = _name, \
+ .parent = _pname, \
+ }
+
+struct pistachio_pll_rate_table {
+ unsigned long fref;
+ unsigned long fout;
+ unsigned int refdiv;
+ unsigned int fbdiv;
+ unsigned int postdiv1;
+ unsigned int postdiv2;
+ unsigned int frac;
+};
+
+enum pistachio_pll_type {
+ PLL_GF40LP_LAINT,
+ PLL_GF40LP_FRAC,
+};
+
+struct pistachio_pll {
+ unsigned int id;
+ unsigned long reg_base;
+ enum pistachio_pll_type type;
+ struct pistachio_pll_rate_table *rates;
+ unsigned int nr_rates;
+ const char *name;
+ const char *parent;
+};
+
+#define PLL(_id, _name, _pname, _type, _reg, _rates) \
+ { \
+ .id = _id, \
+ .reg_base = _reg, \
+ .type = _type, \
+ .rates = _rates, \
+ .nr_rates = ARRAY_SIZE(_rates), \
+ .name = _name, \
+ .parent = _pname, \
+ }
+
+#define PLL_FIXED(_id, _name, _pname, _type, _reg) \
+ { \
+ .id = _id, \
+ .reg_base = _reg, \
+ .type = _type, \
+ .rates = NULL, \
+ .nr_rates = 0, \
+ .name = _name, \
+ .parent = _pname, \
+ }
+
+struct pistachio_clk_provider {
+ struct device_node *node;
+ void __iomem *base;
+ struct clk_onecell_data clk_data;
+};
+
+extern struct pistachio_clk_provider *
+pistachio_clk_alloc_provider(struct device_node *node, unsigned int num_clks);
+extern void pistachio_clk_register_provider(struct pistachio_clk_provider *p);
+
+extern void pistachio_clk_register_gate(struct pistachio_clk_provider *p,
+ struct pistachio_gate *gate,
+ unsigned int num);
+extern void pistachio_clk_register_mux(struct pistachio_clk_provider *p,
+ struct pistachio_mux *mux,
+ unsigned int num);
+extern void pistachio_clk_register_div(struct pistachio_clk_provider *p,
+ struct pistachio_div *div,
+ unsigned int num);
+extern void
+pistachio_clk_register_fixed_factor(struct pistachio_clk_provider *p,
+ struct pistachio_fixed_factor *ff,
+ unsigned int num);
+extern void pistachio_clk_register_pll(struct pistachio_clk_provider *p,
+ struct pistachio_pll *pll,
+ unsigned int num);
+
+extern void pistachio_clk_force_enable(struct pistachio_clk_provider *p,
+ unsigned int *clk_ids, unsigned int num);
+
+#endif
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index a0b036ccb118..b4ac7cfae441 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -233,7 +233,7 @@ config CLKSRC_QCOM
config CLKSRC_VERSATILE
bool "ARM Versatile (Express) reference platforms clock source"
- depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
+ depends on PLAT_VERSATILE && GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
select CLKSRC_OF
default y if MFD_VEXPRESS_SYSREG
help
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 3bd31b1321f6..b81ed1a5342d 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -5,6 +5,7 @@
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
*/
+#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/init.h>
@@ -133,6 +134,9 @@ static void __init __gic_clocksource_init(void)
clocksource_register_hz(&gic_clocksource, gic_frequency);
gic_clockevent_init();
+
+ /* And finally start the counter */
+ gic_start_count();
}
void __init gic_clocksource_init(unsigned int frequency)
@@ -146,11 +150,18 @@ void __init gic_clocksource_init(unsigned int frequency)
static void __init gic_clocksource_of_init(struct device_node *node)
{
+ struct clk *clk;
+
if (WARN_ON(!gic_present || !node->parent ||
!of_device_is_compatible(node->parent, "mti,gic")))
return;
- if (of_property_read_u32(node, "clock-frequency", &gic_frequency)) {
+ clk = of_clk_get(node, 0);
+ if (!IS_ERR(clk)) {
+ gic_frequency = clk_get_rate(clk);
+ clk_put(clk);
+ } else if (of_property_read_u32(node, "clock-frequency",
+ &gic_frequency)) {
pr_err("GIC frequency not specified.\n");
return;
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 40580794e23d..b8a5fa15ca24 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -190,12 +190,6 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices);
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
-/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
-static u64 div_round64(u64 dividend, u32 divisor)
-{
- return div_u64(dividend + (divisor / 2), divisor);
-}
-
/*
* Try detecting repeating patterns by keeping track of the last 8
* intervals, and checking if the standard deviation of that set
@@ -317,7 +311,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* operands are 32 bits.
* Make sure to round up for half microseconds.
*/
- data->predicted_us = div_round64((uint64_t)data->next_timer_us *
+ data->predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
data->correction_factor[data->bucket],
RESOLUTION * DECAY);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index eef79ccd0b7c..ba243db35840 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -36,9 +36,6 @@
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_rect.h>
-#define DIV_ROUND_CLOSEST_ULL(ll, d) \
-({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
-
/**
* _wait_for - magic (register) wait macro
*
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index d8686ce89160..08532d4ffe0a 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include "intel_drv.h"
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index d1542b7d4bc3..4d2815079fc2 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -36,6 +36,7 @@
#include <linux/jiffies.h>
#include <linux/of.h>
#include <linux/delay.h>
+#include <linux/util_macros.h>
#include <linux/platform_data/ina2xx.h>
@@ -141,19 +142,6 @@ static const struct ina2xx_config ina2xx_config[] = {
*/
static const int ina226_avg_tab[] = { 1, 4, 16, 64, 128, 256, 512, 1024 };
-static int ina226_avg_bits(int avg)
-{
- int i;
-
- /* Get the closest average from the tab. */
- for (i = 0; i < ARRAY_SIZE(ina226_avg_tab) - 1; i++) {
- if (avg <= (ina226_avg_tab[i] + ina226_avg_tab[i + 1]) / 2)
- break;
- }
-
- return i; /* Return 0b0111 for values greater than 1024. */
-}
-
static int ina226_reg_to_interval(u16 config)
{
int avg = ina226_avg_tab[INA226_READ_AVG(config)];
@@ -171,7 +159,8 @@ static u16 ina226_interval_to_reg(int interval, u16 config)
avg = DIV_ROUND_CLOSEST(interval * 1000,
INA226_TOTAL_CONV_TIME_DEFAULT);
- avg_bits = ina226_avg_bits(avg);
+ avg_bits = find_closest(avg, ina226_avg_tab,
+ ARRAY_SIZE(ina226_avg_tab));
return (config & ~INA226_AVG_RD_MASK) | INA226_SHIFT_AVG(avg_bits);
}
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 2b4b419273fe..6ff773fcaefb 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -34,6 +34,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
+#include <linux/util_macros.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
@@ -190,15 +191,7 @@ static const int lm85_range_map[] = {
static int RANGE_TO_REG(long range)
{
- int i;
-
- /* Find the closest match */
- for (i = 0; i < 15; ++i) {
- if (range <= (lm85_range_map[i] + lm85_range_map[i + 1]) / 2)
- break;
- }
-
- return i;
+ return find_closest(range, lm85_range_map, ARRAY_SIZE(lm85_range_map));
}
#define RANGE_FROM_REG(val) lm85_range_map[(val) & 0x0f]
@@ -209,16 +202,12 @@ static const int lm85_freq_map[8] = { /* 1 Hz */
static const int adm1027_freq_map[8] = { /* 1 Hz */
11, 15, 22, 29, 35, 44, 59, 88
};
+#define FREQ_MAP_LEN 8
-static int FREQ_TO_REG(const int *map, unsigned long freq)
+static int FREQ_TO_REG(const int *map,
+ unsigned int map_size, unsigned long freq)
{
- int i;
-
- /* Find the closest match */
- for (i = 0; i < 7; ++i)
- if (freq <= (map[i] + map[i + 1]) / 2)
- break;
- return i;
+ return find_closest(freq, map, map_size);
}
static int FREQ_FROM_REG(const int *map, u8 reg)
@@ -828,7 +817,8 @@ static ssize_t set_pwm_freq(struct device *dev,
data->cfg5 &= ~ADT7468_HFPWM;
lm85_write_value(client, ADT7468_REG_CFG5, data->cfg5);
} else { /* Low freq. mode */
- data->pwm_freq[nr] = FREQ_TO_REG(data->freq_map, val);
+ data->pwm_freq[nr] = FREQ_TO_REG(data->freq_map,
+ FREQ_MAP_LEN, val);
lm85_write_value(client, LM85_REG_AFAN_RANGE(nr),
(data->zone[nr].range << 4)
| data->pwm_freq[nr]);
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 21894131190f..49276bbdac3d 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -35,6 +35,7 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/jiffies.h>
+#include <linux/util_macros.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = {
@@ -308,11 +309,8 @@ static u8 pwm_freq_to_reg(unsigned long val, u16 clkin)
unsigned long best0, best1;
/* Best fit for cksel = 0 */
- for (reg0 = 0; reg0 < ARRAY_SIZE(pwm_freq_cksel0) - 1; reg0++) {
- if (val > (pwm_freq_cksel0[reg0] +
- pwm_freq_cksel0[reg0 + 1]) / 2)
- break;
- }
+ reg0 = find_closest_descending(val, pwm_freq_cksel0,
+ ARRAY_SIZE(pwm_freq_cksel0));
if (val < 375) /* cksel = 1 can't beat this */
return reg0;
best0 = pwm_freq_cksel0[reg0];
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index c8d260e33a90..6de62a96e79c 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -60,6 +60,11 @@ config ATMEL_AIC5_IRQ
select MULTI_IRQ_HANDLER
select SPARSE_IRQ
+config BCM7038_L1_IRQ
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
config BCM7120_L2_IRQ
bool
select GENERIC_IRQ_CHIP
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 552a74027601..dda4927e47a6 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
obj-$(CONFIG_SOC_VF610) += irq-vf610-mscm-ir.o
+obj-$(CONFIG_BCM7038_L1_IRQ) += irq-bcm7038-l1.o
obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o
obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o
obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
new file mode 100644
index 000000000000..d3b8c8be15f6
--- /dev/null
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -0,0 +1,335 @@
+/*
+ * Broadcom BCM7038 style Level 1 interrupt controller driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ * Author: Kevin Cernekee
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/irqchip/chained_irq.h>
+
+#include "irqchip.h"
+
+#define IRQS_PER_WORD 32
+#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4)
+#define MAX_WORDS 8
+
+struct bcm7038_l1_cpu;
+
+struct bcm7038_l1_chip {
+ raw_spinlock_t lock;
+ unsigned int n_words;
+ struct irq_domain *domain;
+ struct bcm7038_l1_cpu *cpus[NR_CPUS];
+ u8 affinity[MAX_WORDS * IRQS_PER_WORD];
+};
+
+struct bcm7038_l1_cpu {
+ void __iomem *map_base;
+ u32 mask_cache[0];
+};
+
+/*
+ * STATUS/MASK_STATUS/MASK_SET/MASK_CLEAR are packed one right after another:
+ *
+ * 7038:
+ * 0x1000_1400: W0_STATUS
+ * 0x1000_1404: W1_STATUS
+ * 0x1000_1408: W0_MASK_STATUS
+ * 0x1000_140c: W1_MASK_STATUS
+ * 0x1000_1410: W0_MASK_SET
+ * 0x1000_1414: W1_MASK_SET
+ * 0x1000_1418: W0_MASK_CLEAR
+ * 0x1000_141c: W1_MASK_CLEAR
+ *
+ * 7445:
+ * 0xf03e_1500: W0_STATUS
+ * 0xf03e_1504: W1_STATUS
+ * 0xf03e_1508: W2_STATUS
+ * 0xf03e_150c: W3_STATUS
+ * 0xf03e_1510: W4_STATUS
+ * 0xf03e_1514: W0_MASK_STATUS
+ * 0xf03e_1518: W1_MASK_STATUS
+ * [...]
+ */
+
+static inline unsigned int reg_status(struct bcm7038_l1_chip *intc,
+ unsigned int word)
+{
+ return (0 * intc->n_words + word) * sizeof(u32);
+}
+
+static inline unsigned int reg_mask_status(struct bcm7038_l1_chip *intc,
+ unsigned int word)
+{
+ return (1 * intc->n_words + word) * sizeof(u32);
+}
+
+static inline unsigned int reg_mask_set(struct bcm7038_l1_chip *intc,
+ unsigned int word)
+{
+ return (2 * intc->n_words + word) * sizeof(u32);
+}
+
+static inline unsigned int reg_mask_clr(struct bcm7038_l1_chip *intc,
+ unsigned int word)
+{
+ return (3 * intc->n_words + word) * sizeof(u32);
+}
+
+static inline u32 l1_readl(void __iomem *reg)
+{
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ return ioread32be(reg);
+ else
+ return readl(reg);
+}
+
+static inline void l1_writel(u32 val, void __iomem *reg)
+{
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ iowrite32be(val, reg);
+ else
+ writel(val, reg);
+}
+
+static void bcm7038_l1_irq_handle(unsigned int irq, struct irq_desc *desc)
+{
+ struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc);
+ struct bcm7038_l1_cpu *cpu;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int idx;
+
+#ifdef CONFIG_SMP
+ cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
+#else
+ cpu = intc->cpus[0];
+#endif
+
+ chained_irq_enter(chip, desc);
+
+ for (idx = 0; idx < intc->n_words; idx++) {
+ int base = idx * IRQS_PER_WORD;
+ unsigned long pending, flags;
+ int hwirq;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ pending = l1_readl(cpu->map_base + reg_status(intc, idx)) &
+ ~cpu->mask_cache[idx];
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+
+ for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
+ generic_handle_irq(irq_find_mapping(intc->domain,
+ base + hwirq));
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void __bcm7038_l1_unmask(struct irq_data *d, unsigned int cpu_idx)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+
+ intc->cpus[cpu_idx]->mask_cache[word] &= ~mask;
+ l1_writel(mask, intc->cpus[cpu_idx]->map_base +
+ reg_mask_clr(intc, word));
+}
+
+static void __bcm7038_l1_mask(struct irq_data *d, unsigned int cpu_idx)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+
+ intc->cpus[cpu_idx]->mask_cache[word] |= mask;
+ l1_writel(mask, intc->cpus[cpu_idx]->map_base +
+ reg_mask_set(intc, word));
+}
+
+static void bcm7038_l1_unmask(struct irq_data *d)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ __bcm7038_l1_unmask(d, intc->affinity[d->hwirq]);
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static void bcm7038_l1_mask(struct irq_data *d)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ __bcm7038_l1_mask(d, intc->affinity[d->hwirq]);
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static int bcm7038_l1_set_affinity(struct irq_data *d,
+ const struct cpumask *dest,
+ bool force)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+ irq_hw_number_t hw = d->hwirq;
+ u32 word = hw / IRQS_PER_WORD;
+ u32 mask = BIT(hw % IRQS_PER_WORD);
+ unsigned int first_cpu = cpumask_any_and(dest, cpu_online_mask);
+ bool was_disabled;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+
+ was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] &
+ mask);
+ __bcm7038_l1_mask(d, intc->affinity[hw]);
+ intc->affinity[hw] = first_cpu;
+ if (!was_disabled)
+ __bcm7038_l1_unmask(d, first_cpu);
+
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+ return 0;
+}
+
+static int __init bcm7038_l1_init_one(struct device_node *dn,
+ unsigned int idx,
+ struct bcm7038_l1_chip *intc)
+{
+ struct resource res;
+ resource_size_t sz;
+ struct bcm7038_l1_cpu *cpu;
+ unsigned int i, n_words, parent_irq;
+
+ if (of_address_to_resource(dn, idx, &res))
+ return -EINVAL;
+ sz = resource_size(&res);
+ n_words = sz / REG_BYTES_PER_IRQ_WORD;
+
+ if (n_words > MAX_WORDS)
+ return -EINVAL;
+ else if (!intc->n_words)
+ intc->n_words = n_words;
+ else if (intc->n_words != n_words)
+ return -EINVAL;
+
+ cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
+ GFP_KERNEL);
+ if (!cpu)
+ return -ENOMEM;
+
+ cpu->map_base = ioremap(res.start, sz);
+ if (!cpu->map_base)
+ return -ENOMEM;
+
+ for (i = 0; i < n_words; i++) {
+ l1_writel(0xffffffff, cpu->map_base + reg_mask_set(intc, i));
+ cpu->mask_cache[i] = 0xffffffff;
+ }
+
+ parent_irq = irq_of_parse_and_map(dn, idx);
+ if (!parent_irq) {
+ pr_err("failed to map parent interrupt %d\n", parent_irq);
+ return -EINVAL;
+ }
+ irq_set_handler_data(parent_irq, intc);
+ irq_set_chained_handler(parent_irq, bcm7038_l1_irq_handle);
+
+ return 0;
+}
+
+static struct irq_chip bcm7038_l1_irq_chip = {
+ .name = "bcm7038-l1",
+ .irq_mask = bcm7038_l1_mask,
+ .irq_unmask = bcm7038_l1_unmask,
+ .irq_set_affinity = bcm7038_l1_set_affinity,
+};
+
+static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw_irq)
+{
+ irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
+ irq_set_chip_data(virq, d->host_data);
+ return 0;
+}
+
+static const struct irq_domain_ops bcm7038_l1_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = bcm7038_l1_map,
+};
+
+int __init bcm7038_l1_of_init(struct device_node *dn,
+ struct device_node *parent)
+{
+ struct bcm7038_l1_chip *intc;
+ int idx, ret;
+
+ intc = kzalloc(sizeof(*intc), GFP_KERNEL);
+ if (!intc)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&intc->lock);
+ for_each_possible_cpu(idx) {
+ ret = bcm7038_l1_init_one(dn, idx, intc);
+ if (ret < 0) {
+ if (idx)
+ break;
+ pr_err("failed to remap intc L1 registers\n");
+ goto out_free;
+ }
+ }
+
+ intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
+ &bcm7038_l1_domain_ops,
+ intc);
+ if (!intc->domain) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
+ pr_info("registered BCM7038 L1 intc (mem: 0x%p, IRQs: %d)\n",
+ intc->cpus[0]->map_base, IRQS_PER_WORD * intc->n_words);
+
+ return 0;
+
+out_unmap:
+ for_each_possible_cpu(idx) {
+ struct bcm7038_l1_cpu *cpu = intc->cpus[idx];
+
+ if (cpu) {
+ if (cpu->map_base)
+ iounmap(cpu->map_base);
+ kfree(cpu);
+ }
+ }
+out_free:
+ kfree(intc);
+ return ret;
+}
+
+IRQCHIP_DECLARE(bcm7038_l1, "brcm,bcm7038-l1-intc", bcm7038_l1_of_init);
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 8eec8e1201d9..3ba5cc780fcb 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kconfig.h>
+#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_irq.h>
@@ -34,15 +35,21 @@
#define IRQSTAT 0x04
#define MAX_WORDS 4
+#define MAX_MAPPINGS (MAX_WORDS * 2)
#define IRQS_PER_WORD 32
struct bcm7120_l2_intc_data {
unsigned int n_words;
- void __iomem *base[MAX_WORDS];
+ void __iomem *map_base[MAX_MAPPINGS];
+ void __iomem *pair_base[MAX_WORDS];
+ int en_offset[MAX_WORDS];
+ int stat_offset[MAX_WORDS];
struct irq_domain *domain;
bool can_wake;
u32 irq_fwd_mask[MAX_WORDS];
u32 irq_map_mask[MAX_WORDS];
+ int num_parent_irqs;
+ const __be32 *map_mask_prop;
};
static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
@@ -61,7 +68,8 @@ static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
int hwirq;
irq_gc_lock(gc);
- pending = irq_reg_readl(gc, IRQSTAT) & gc->mask_cache;
+ pending = irq_reg_readl(gc, b->stat_offset[idx]) &
+ gc->mask_cache;
irq_gc_unlock(gc);
for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
@@ -76,27 +84,30 @@ static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
static void bcm7120_l2_intc_suspend(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct bcm7120_l2_intc_data *b = gc->private;
irq_gc_lock(gc);
if (b->can_wake)
- irq_reg_writel(gc, gc->mask_cache | gc->wake_active, IRQEN);
+ irq_reg_writel(gc, gc->mask_cache | gc->wake_active,
+ ct->regs.mask);
irq_gc_unlock(gc);
}
static void bcm7120_l2_intc_resume(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
/* Restore the saved mask */
irq_gc_lock(gc);
- irq_reg_writel(gc, gc->mask_cache, IRQEN);
+ irq_reg_writel(gc, gc->mask_cache, ct->regs.mask);
irq_gc_unlock(gc);
}
static int bcm7120_l2_intc_init_one(struct device_node *dn,
struct bcm7120_l2_intc_data *data,
- int irq, const __be32 *map_mask)
+ int irq)
{
int parent_irq;
unsigned int idx;
@@ -110,9 +121,15 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn,
/* For multiple parent IRQs with multiple words, this looks like:
* <irq0_w0 irq0_w1 irq1_w0 irq1_w1 ...>
*/
- for (idx = 0; idx < data->n_words; idx++)
- data->irq_map_mask[idx] |=
- be32_to_cpup(map_mask + irq * data->n_words + idx);
+ for (idx = 0; idx < data->n_words; idx++) {
+ if (data->map_mask_prop) {
+ data->irq_map_mask[idx] |=
+ be32_to_cpup(data->map_mask_prop +
+ irq * data->n_words + idx);
+ } else {
+ data->irq_map_mask[idx] = 0xffffffff;
+ }
+ }
irq_set_handler_data(parent_irq, data);
irq_set_chained_handler(parent_irq, bcm7120_l2_intc_irq_handle);
@@ -120,68 +137,107 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn,
return 0;
}
-int __init bcm7120_l2_intc_of_init(struct device_node *dn,
- struct device_node *parent)
+static int __init bcm7120_l2_intc_iomap_7120(struct device_node *dn,
+ struct bcm7120_l2_intc_data *data)
+{
+ int ret;
+
+ data->map_base[0] = of_iomap(dn, 0);
+ if (!data->map_base[0]) {
+ pr_err("unable to map registers\n");
+ return -ENOMEM;
+ }
+
+ data->pair_base[0] = data->map_base[0];
+ data->en_offset[0] = IRQEN;
+ data->stat_offset[0] = IRQSTAT;
+ data->n_words = 1;
+
+ ret = of_property_read_u32_array(dn, "brcm,int-fwd-mask",
+ data->irq_fwd_mask, data->n_words);
+ if (ret != 0 && ret != -EINVAL) {
+ /* property exists but has the wrong number of words */
+ pr_err("invalid brcm,int-fwd-mask property\n");
+ return -EINVAL;
+ }
+
+ data->map_mask_prop = of_get_property(dn, "brcm,int-map-mask", &ret);
+ if (!data->map_mask_prop ||
+ (ret != (sizeof(__be32) * data->num_parent_irqs * data->n_words))) {
+ pr_err("invalid brcm,int-map-mask property\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init bcm7120_l2_intc_iomap_3380(struct device_node *dn,
+ struct bcm7120_l2_intc_data *data)
+{
+ unsigned int gc_idx;
+
+ for (gc_idx = 0; gc_idx < MAX_WORDS; gc_idx++) {
+ unsigned int map_idx = gc_idx * 2;
+ void __iomem *en = of_iomap(dn, map_idx + 0);
+ void __iomem *stat = of_iomap(dn, map_idx + 1);
+ void __iomem *base = min(en, stat);
+
+ data->map_base[map_idx + 0] = en;
+ data->map_base[map_idx + 1] = stat;
+
+ if (!base)
+ break;
+
+ data->pair_base[gc_idx] = base;
+ data->en_offset[gc_idx] = en - base;
+ data->stat_offset[gc_idx] = stat - base;
+ }
+
+ if (!gc_idx) {
+ pr_err("unable to map registers\n");
+ return -EINVAL;
+ }
+
+ data->n_words = gc_idx;
+ return 0;
+}
+
+int __init bcm7120_l2_intc_probe(struct device_node *dn,
+ struct device_node *parent,
+ int (*iomap_regs_fn)(struct device_node *,
+ struct bcm7120_l2_intc_data *),
+ const char *intc_name)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct bcm7120_l2_intc_data *data;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
- const __be32 *map_mask;
- int num_parent_irqs;
- int ret = 0, len;
+ int ret = 0;
unsigned int idx, irq, flags;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- for (idx = 0; idx < MAX_WORDS; idx++) {
- data->base[idx] = of_iomap(dn, idx);
- if (!data->base[idx])
- break;
- data->n_words = idx + 1;
- }
- if (!data->n_words) {
- pr_err("failed to remap intc L2 registers\n");
- ret = -ENOMEM;
- goto out_unmap;
- }
-
- /* Enable all interrupts specified in the interrupt forward mask;
- * disable all others. If the property doesn't exist (-EINVAL),
- * assume all zeroes.
- */
- ret = of_property_read_u32_array(dn, "brcm,int-fwd-mask",
- data->irq_fwd_mask, data->n_words);
- if (ret == 0 || ret == -EINVAL) {
- for (idx = 0; idx < data->n_words; idx++)
- __raw_writel(data->irq_fwd_mask[idx],
- data->base[idx] + IRQEN);
- } else {
- /* property exists but has the wrong number of words */
- pr_err("invalid int-fwd-mask property\n");
- ret = -EINVAL;
- goto out_unmap;
- }
-
- num_parent_irqs = of_irq_count(dn);
- if (num_parent_irqs <= 0) {
+ data->num_parent_irqs = of_irq_count(dn);
+ if (data->num_parent_irqs <= 0) {
pr_err("invalid number of parent interrupts\n");
ret = -ENOMEM;
goto out_unmap;
}
- map_mask = of_get_property(dn, "brcm,int-map-mask", &len);
- if (!map_mask ||
- (len != (sizeof(*map_mask) * num_parent_irqs * data->n_words))) {
- pr_err("invalid brcm,int-map-mask property\n");
- ret = -EINVAL;
+ ret = iomap_regs_fn(dn, data);
+ if (ret < 0)
goto out_unmap;
+
+ for (idx = 0; idx < data->n_words; idx++) {
+ __raw_writel(data->irq_fwd_mask[idx],
+ data->pair_base[idx] +
+ data->en_offset[idx]);
}
- for (irq = 0; irq < num_parent_irqs; irq++) {
- ret = bcm7120_l2_intc_init_one(dn, data, irq, map_mask);
+ for (irq = 0; irq < data->num_parent_irqs; irq++) {
+ ret = bcm7120_l2_intc_init_one(dn, data, irq);
if (ret)
goto out_unmap;
}
@@ -215,11 +271,12 @@ int __init bcm7120_l2_intc_of_init(struct device_node *dn,
gc = irq_get_domain_generic_chip(data->domain, irq);
gc->unused = 0xffffffff & ~data->irq_map_mask[idx];
- gc->reg_base = data->base[idx];
gc->private = data;
ct = gc->chip_types;
- ct->regs.mask = IRQEN;
+ gc->reg_base = data->pair_base[idx];
+ ct->regs.mask = data->en_offset[idx];
+
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_ack = irq_gc_noop;
@@ -236,20 +293,38 @@ int __init bcm7120_l2_intc_of_init(struct device_node *dn,
}
}
- pr_info("registered BCM7120 L2 intc (mem: 0x%p, parent IRQ(s): %d)\n",
- data->base[0], num_parent_irqs);
+ pr_info("registered %s intc (mem: 0x%p, parent IRQ(s): %d)\n",
+ intc_name, data->map_base[0], data->num_parent_irqs);
return 0;
out_free_domain:
irq_domain_remove(data->domain);
out_unmap:
- for (idx = 0; idx < MAX_WORDS; idx++) {
- if (data->base[idx])
- iounmap(data->base[idx]);
+ for (idx = 0; idx < MAX_MAPPINGS; idx++) {
+ if (data->map_base[idx])
+ iounmap(data->map_base[idx]);
}
kfree(data);
return ret;
}
+
+int __init bcm7120_l2_intc_probe_7120(struct device_node *dn,
+ struct device_node *parent)
+{
+ return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_7120,
+ "BCM7120 L2");
+}
+
+int __init bcm7120_l2_intc_probe_3380(struct device_node *dn,
+ struct device_node *parent)
+{
+ return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_3380,
+ "BCM3380 L2");
+}
+
IRQCHIP_DECLARE(bcm7120_l2_intc, "brcm,bcm7120-l2-intc",
- bcm7120_l2_intc_of_init);
+ bcm7120_l2_intc_probe_7120);
+
+IRQCHIP_DECLARE(bcm3380_l2_intc, "brcm,bcm3380-l2-intc",
+ bcm7120_l2_intc_probe_3380);
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 313c2c64498a..d6bcc6be0777 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -136,7 +136,11 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np,
/* Disable all interrupts by default */
writel(0xffffffff, data->base + CPU_MASK_SET);
- writel(0xffffffff, data->base + CPU_CLEAR);
+
+ /* Wakeup interrupts may be retained from S5 (cold boot) */
+ data->can_wake = of_property_read_bool(np, "brcm,irq-can-wake");
+ if (!data->can_wake)
+ writel(0xffffffff, data->base + CPU_CLEAR);
data->parent_irq = irq_of_parse_and_map(np, 0);
if (!data->parent_irq) {
@@ -188,8 +192,7 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np,
ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
ct->chip.irq_resume = brcmstb_l2_intc_resume;
- if (of_property_read_bool(np, "brcm,irq-can-wake")) {
- data->can_wake = true;
+ if (data->can_wake) {
/* This IRQ chip can wake the system, set all child interrupts
* in wake_enabled mask
*/
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index f2d269bca789..bc48b7dc89ec 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -239,7 +239,7 @@ int gic_get_c0_compare_int(void)
int gic_get_c0_perfcount_int(void)
{
if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
- /* Is the erformance counter shared with the timer? */
+ /* Is the performance counter shared with the timer? */
if (cp0_perfcount_irq < 0)
return -1;
return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
@@ -248,6 +248,29 @@ int gic_get_c0_perfcount_int(void)
GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
}
+int gic_get_c0_fdc_int(void)
+{
+ if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
+ /* Is the FDC IRQ even present? */
+ if (cp0_fdc_irq < 0)
+ return -1;
+ return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
+ }
+
+ /*
+ * Some cores claim the FDC is routable but it doesn't actually seem to
+ * be connected.
+ */
+ switch (current_cpu_type()) {
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ return -1;
+ }
+
+ return irq_create_mapping(gic_irq_domain,
+ GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
+}
+
static void gic_handle_shared_int(void)
{
unsigned int i, intr, virq;
@@ -613,15 +636,20 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
* of the MIPS kernel code does not use the percpu IRQ API for
* the CP0 timer and performance counter interrupts.
*/
- if (intr != GIC_LOCAL_INT_TIMER && intr != GIC_LOCAL_INT_PERFCTR) {
+ switch (intr) {
+ case GIC_LOCAL_INT_TIMER:
+ case GIC_LOCAL_INT_PERFCTR:
+ case GIC_LOCAL_INT_FDC:
+ irq_set_chip_and_handler(virq,
+ &gic_all_vpes_local_irq_controller,
+ handle_percpu_irq);
+ break;
+ default:
irq_set_chip_and_handler(virq,
&gic_local_irq_controller,
handle_percpu_devid_irq);
irq_set_percpu_devid(virq);
- } else {
- irq_set_chip_and_handler(virq,
- &gic_all_vpes_local_irq_controller,
- handle_percpu_irq);
+ break;
}
spin_lock_irqsave(&gic_lock, flags);
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 84325f267acf..84b0a2d74d60 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -6,6 +6,15 @@ menuconfig MAILBOX
signals. Say Y if your platform supports hardware mailboxes.
if MAILBOX
+
+config ARM_MHU
+ tristate "ARM MHU Mailbox"
+ depends on ARM_AMBA
+ help
+ Say Y here if you want to build the ARM MHU controller driver.
+ The controller has 3 mailbox channels, the last of which can be
+ used in Secure mode only.
+
config PL320_MBOX
bool "ARM PL320 Mailbox"
depends on ARM_AMBA
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 2e79231154cf..b18201e97e29 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -2,6 +2,8 @@
obj-$(CONFIG_MAILBOX) += mailbox.o
+obj-$(CONFIG_ARM_MHU) += arm_mhu.o
+
obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c
new file mode 100644
index 000000000000..ac693c635357
--- /dev/null
+++ b/drivers/mailbox/arm_mhu.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd.
+ * Copyright (C) 2015 Linaro Ltd.
+ * Author: Jassi Brar <jaswinder.singh@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/amba/bus.h>
+#include <linux/mailbox_controller.h>
+
+#define INTR_STAT_OFS 0x0
+#define INTR_SET_OFS 0x8
+#define INTR_CLR_OFS 0x10
+
+#define MHU_LP_OFFSET 0x0
+#define MHU_HP_OFFSET 0x20
+#define MHU_SEC_OFFSET 0x200
+#define TX_REG_OFFSET 0x100
+
+#define MHU_CHANS 3
+
+struct mhu_link {
+ unsigned irq;
+ void __iomem *tx_reg;
+ void __iomem *rx_reg;
+};
+
+struct arm_mhu {
+ void __iomem *base;
+ struct mhu_link mlink[MHU_CHANS];
+ struct mbox_chan chan[MHU_CHANS];
+ struct mbox_controller mbox;
+};
+
+static irqreturn_t mhu_rx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct mhu_link *mlink = chan->con_priv;
+ u32 val;
+
+ val = readl_relaxed(mlink->rx_reg + INTR_STAT_OFS);
+ if (!val)
+ return IRQ_NONE;
+
+ mbox_chan_received_data(chan, (void *)&val);
+
+ writel_relaxed(val, mlink->rx_reg + INTR_CLR_OFS);
+
+ return IRQ_HANDLED;
+}
+
+static bool mhu_last_tx_done(struct mbox_chan *chan)
+{
+ struct mhu_link *mlink = chan->con_priv;
+ u32 val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
+
+ return (val == 0);
+}
+
+static int mhu_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mhu_link *mlink = chan->con_priv;
+ u32 *arg = data;
+
+ writel_relaxed(*arg, mlink->tx_reg + INTR_SET_OFS);
+
+ return 0;
+}
+
+static int mhu_startup(struct mbox_chan *chan)
+{
+ struct mhu_link *mlink = chan->con_priv;
+ u32 val;
+ int ret;
+
+ val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
+ writel_relaxed(val, mlink->tx_reg + INTR_CLR_OFS);
+
+ ret = request_irq(mlink->irq, mhu_rx_interrupt,
+ IRQF_SHARED, "mhu_link", chan);
+ if (ret) {
+ dev_err(chan->mbox->dev,
+ "Unable to aquire IRQ %d\n", mlink->irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mhu_shutdown(struct mbox_chan *chan)
+{
+ struct mhu_link *mlink = chan->con_priv;
+
+ free_irq(mlink->irq, chan);
+}
+
+static struct mbox_chan_ops mhu_ops = {
+ .send_data = mhu_send_data,
+ .startup = mhu_startup,
+ .shutdown = mhu_shutdown,
+ .last_tx_done = mhu_last_tx_done,
+};
+
+static int mhu_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int i, err;
+ struct arm_mhu *mhu;
+ struct device *dev = &adev->dev;
+ int mhu_reg[MHU_CHANS] = {MHU_LP_OFFSET, MHU_HP_OFFSET, MHU_SEC_OFFSET};
+
+ /* Allocate memory for device */
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ mhu->base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(mhu->base)) {
+ dev_err(dev, "ioremap failed\n");
+ return PTR_ERR(mhu->base);
+ }
+
+ for (i = 0; i < MHU_CHANS; i++) {
+ mhu->chan[i].con_priv = &mhu->mlink[i];
+ mhu->mlink[i].irq = adev->irq[i];
+ mhu->mlink[i].rx_reg = mhu->base + mhu_reg[i];
+ mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
+ }
+
+ mhu->mbox.dev = dev;
+ mhu->mbox.chans = &mhu->chan[0];
+ mhu->mbox.num_chans = MHU_CHANS;
+ mhu->mbox.ops = &mhu_ops;
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 10;
+
+ amba_set_drvdata(adev, mhu);
+
+ err = mbox_controller_register(&mhu->mbox);
+ if (err) {
+ dev_err(dev, "Failed to register mailboxes %d\n", err);
+ return err;
+ }
+
+ dev_info(dev, "ARM MHU Mailbox registered\n");
+ return 0;
+}
+
+static int mhu_remove(struct amba_device *adev)
+{
+ struct arm_mhu *mhu = amba_get_drvdata(adev);
+
+ mbox_controller_unregister(&mhu->mbox);
+
+ return 0;
+}
+
+static struct amba_id mhu_ids[] = {
+ {
+ .id = 0x1bb098,
+ .mask = 0xffffff,
+ },
+ { 0, 0 },
+};
+MODULE_DEVICE_TABLE(amba, mhu_ids);
+
+static struct amba_driver arm_mhu_driver = {
+ .drv = {
+ .name = "mhu",
+ },
+ .id_table = mhu_ids,
+ .probe = mhu_probe,
+ .remove = mhu_remove,
+};
+module_amba_driver(arm_mhu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ARM MHU Driver");
+MODULE_AUTHOR("Jassi Brar <jassisinghbrar@gmail.com>");
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 977c814cdf6f..7e91d68a3ac3 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -20,10 +20,35 @@
* shared memory regions as defined in the PCC table entries. The PCC
* specification supports a Doorbell mechanism for the PCC clients
* to notify the platform about new data. This Doorbell information
- * is also specified in each PCC table entry. See pcc_send_data()
- * and pcc_tx_done() for basic mode of operation.
+ * is also specified in each PCC table entry.
*
- * For more details about PCC, please see the ACPI specification from
+ * Typical high level flow of operation is:
+ *
+ * PCC Reads:
+ * * Client tries to acquire a channel lock.
+ * * After it is acquired it writes READ cmd in communication region cmd
+ * address.
+ * * Client issues mbox_send_message() which rings the PCC doorbell
+ * for its PCC channel.
+ * * If command completes, then client has control over channel and
+ * it can proceed with its reads.
+ * * Client releases lock.
+ *
+ * PCC Writes:
+ * * Client tries to acquire channel lock.
+ * * Client writes to its communication region after it acquires a
+ * channel lock.
+ * * Client writes WRITE cmd in communication region cmd address.
+ * * Client issues mbox_send_message() which rings the PCC doorbell
+ * for its PCC channel.
+ * * If command completes, then writes have succeded and it can release
+ * the channel lock.
+ *
+ * There is a Nominal latency defined for each channel which indicates
+ * how long to wait until a command completes. If command is not complete
+ * the client needs to retry or assume failure.
+ *
+ * For more details about PCC, please see the ACPI specification from
* http://www.uefi.org/ACPIv5.1 Section 14.
*
* This file implements PCC as a Mailbox controller and allows for PCC
@@ -42,8 +67,6 @@
#include "mailbox.h"
#define MAX_PCC_SUBSPACES 256
-#define PCCS_SS_SIG_MAGIC 0x50434300
-#define PCC_CMD_COMPLETE 0x1
static struct mbox_chan *pcc_mbox_channels;
@@ -71,23 +94,6 @@ static struct mbox_chan *get_pcc_channel(int id)
}
/**
- * get_subspace_id - Given a Mailbox channel, find out the
- * PCC subspace id.
- * @chan: Pointer to Mailbox Channel from which we want
- * the index.
- * Return: Errno if not found, else positive index number.
- */
-static int get_subspace_id(struct mbox_chan *chan)
-{
- unsigned int id = chan - pcc_mbox_channels;
-
- if (id < 0 || id > pcc_mbox_ctrl.num_chans)
- return -ENOENT;
-
- return id;
-}
-
-/**
* pcc_mbox_request_channel - PCC clients call this function to
* request a pointer to their PCC subspace, from which they
* can get the details of communicating with the remote.
@@ -117,7 +123,7 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
chan = get_pcc_channel(subspace_id);
if (!chan || chan->cl) {
- dev_err(dev, "%s: PCC mailbox not free\n", __func__);
+ dev_err(dev, "Channel not found for idx: %d\n", subspace_id);
return ERR_PTR(-EBUSY);
}
@@ -161,81 +167,30 @@ void pcc_mbox_free_channel(struct mbox_chan *chan)
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
/**
- * pcc_tx_done - Callback from Mailbox controller code to
- * check if PCC message transmission completed.
- * @chan: Pointer to Mailbox channel on which previous
- * transmission occurred.
- *
- * Return: TRUE if succeeded.
- */
-static bool pcc_tx_done(struct mbox_chan *chan)
-{
- struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
- struct acpi_pcct_shared_memory *generic_comm_base =
- (struct acpi_pcct_shared_memory *) pcct_ss->base_address;
- u16 cmd_delay = pcct_ss->latency;
- unsigned int retries = 0;
-
- /* Try a few times while waiting for platform to consume */
- while (!(readw_relaxed(&generic_comm_base->status)
- & PCC_CMD_COMPLETE)) {
-
- if (retries++ < 5)
- udelay(cmd_delay);
- else {
- /*
- * If the remote is dead, this will cause the Mbox
- * controller to timeout after mbox client.tx_tout
- * msecs.
- */
- pr_err("PCC platform did not respond.\n");
- return false;
- }
- }
- return true;
-}
-
-/**
- * pcc_send_data - Called from Mailbox Controller code to finally
- * transmit data over channel.
+ * pcc_send_data - Called from Mailbox Controller code. Used
+ * here only to ring the channel doorbell. The PCC client
+ * specific read/write is done in the client driver in
+ * order to maintain atomicity over PCC channel once
+ * OS has control over it. See above for flow of operations.
* @chan: Pointer to Mailbox channel over which to send data.
- * @data: Actual data to be written over channel.
+ * @data: Client specific data written over channel. Used here
+ * only for debug after PCC transaction completes.
*
* Return: Err if something failed else 0 for success.
*/
static int pcc_send_data(struct mbox_chan *chan, void *data)
{
struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
- struct acpi_pcct_shared_memory *generic_comm_base =
- (struct acpi_pcct_shared_memory *) pcct_ss->base_address;
struct acpi_generic_address doorbell;
u64 doorbell_preserve;
u64 doorbell_val;
u64 doorbell_write;
- u16 cmd = *(u16 *) data;
- u16 ss_idx = -1;
-
- ss_idx = get_subspace_id(chan);
-
- if (ss_idx < 0) {
- pr_err("Invalid Subspace ID from PCC client\n");
- return -EINVAL;
- }
doorbell = pcct_ss->doorbell_register;
doorbell_preserve = pcct_ss->preserve_mask;
doorbell_write = pcct_ss->write_mask;
- /* Write to the shared comm region. */
- writew(cmd, &generic_comm_base->command);
-
- /* Write Subspace MAGIC value so platform can identify destination. */
- writel((PCCS_SS_SIG_MAGIC | ss_idx), &generic_comm_base->signature);
-
- /* Flip CMD COMPLETE bit */
- writew(0, &generic_comm_base->status);
-
- /* Sync notification from OSPM to Platform. */
+ /* Sync notification from OS to Platform. */
acpi_read(&doorbell_val, &doorbell);
acpi_write((doorbell_val & doorbell_preserve) | doorbell_write,
&doorbell);
@@ -245,7 +200,6 @@ static int pcc_send_data(struct mbox_chan *chan, void *data)
static struct mbox_chan_ops pcc_chan_ops = {
.send_data = pcc_send_data,
- .last_tx_done = pcc_tx_done,
};
/**
@@ -351,8 +305,6 @@ static int pcc_mbox_probe(struct platform_device *pdev)
pcc_mbox_ctrl.chans = pcc_mbox_channels;
pcc_mbox_ctrl.ops = &pcc_chan_ops;
- pcc_mbox_ctrl.txdone_poll = true;
- pcc_mbox_ctrl.txpoll_period = 10;
pcc_mbox_ctrl.dev = &pdev->dev;
pr_info("Registering PCC driver as Mailbox controller\n");
diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c
index 149fdca3fb44..72b0e2db3aab 100644
--- a/drivers/media/dvb-frontends/cxd2820r_c.c
+++ b/drivers/media/dvb-frontends/cxd2820r_c.c
@@ -79,7 +79,7 @@ int cxd2820r_set_frontend_c(struct dvb_frontend *fe)
num = if_freq / 1000; /* Hz => kHz */
num *= 0x4000;
- if_ctl = 0x4000 - cxd2820r_div_u64_round_closest(num, 41000);
+ if_ctl = 0x4000 - DIV_ROUND_CLOSEST_ULL(num, 41000);
buf[0] = (if_ctl >> 8) & 0x3f;
buf[1] = (if_ctl >> 0) & 0xff;
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index 422e84bbb008..490e090048ef 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -244,12 +244,6 @@ error:
return ret;
}
-/* 64 bit div with round closest, like DIV_ROUND_CLOSEST but 64 bit */
-u32 cxd2820r_div_u64_round_closest(u64 dividend, u32 divisor)
-{
- return div_u64(dividend + (divisor / 2), divisor);
-}
-
static int cxd2820r_set_frontend(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
diff --git a/drivers/media/dvb-frontends/cxd2820r_priv.h b/drivers/media/dvb-frontends/cxd2820r_priv.h
index 7ff5f60c83e1..4b428959b16e 100644
--- a/drivers/media/dvb-frontends/cxd2820r_priv.h
+++ b/drivers/media/dvb-frontends/cxd2820r_priv.h
@@ -64,8 +64,6 @@ int cxd2820r_wr_reg_mask(struct cxd2820r_priv *priv, u32 reg, u8 val,
int cxd2820r_wr_regs(struct cxd2820r_priv *priv, u32 reginfo, u8 *val,
int len);
-u32 cxd2820r_div_u64_round_closest(u64 dividend, u32 divisor);
-
int cxd2820r_wr_regs(struct cxd2820r_priv *priv, u32 reginfo, u8 *val,
int len);
diff --git a/drivers/media/dvb-frontends/cxd2820r_t.c b/drivers/media/dvb-frontends/cxd2820r_t.c
index 51401d036530..008cb2ac8480 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t.c
@@ -103,7 +103,7 @@ int cxd2820r_set_frontend_t(struct dvb_frontend *fe)
num = if_freq / 1000; /* Hz => kHz */
num *= 0x1000000;
- if_ctl = cxd2820r_div_u64_round_closest(num, 41000);
+ if_ctl = DIV_ROUND_CLOSEST_ULL(num, 41000);
buf[0] = ((if_ctl >> 16) & 0xff);
buf[1] = ((if_ctl >> 8) & 0xff);
buf[2] = ((if_ctl >> 0) & 0xff);
diff --git a/drivers/media/dvb-frontends/cxd2820r_t2.c b/drivers/media/dvb-frontends/cxd2820r_t2.c
index 9c0c4f42175c..35fe364c7182 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t2.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t2.c
@@ -120,7 +120,7 @@ int cxd2820r_set_frontend_t2(struct dvb_frontend *fe)
num = if_freq / 1000; /* Hz => kHz */
num *= 0x1000000;
- if_ctl = cxd2820r_div_u64_round_closest(num, 41000);
+ if_ctl = DIV_ROUND_CLOSEST_ULL(num, 41000);
buf[0] = ((if_ctl >> 16) & 0xff);
buf[1] = ((if_ctl >> 8) & 0xff);
buf[2] = ((if_ctl >> 0) & 0xff);
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index fc145d202c46..922a750640e8 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -758,7 +758,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
if (msb->data_dir == READ) {
- for (cnt = 0; cnt < msb->current_seg; cnt++)
+ for (cnt = 0; cnt < msb->current_seg; cnt++) {
t_len += msb->req_sg[cnt].length
/ msb->page_size;
@@ -766,6 +766,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
t_len += msb->current_page - 1;
t_len *= msb->page_size;
+ }
}
} else
t_len = blk_rq_bytes(msb->block_req);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c69afb5e264e..2fc426926574 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2230,7 +2230,7 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
part_md->part_type = part_type;
list_add(&part_md->part, &md->part);
- string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
+ string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
cap_str, sizeof(cap_str));
pr_info("%s: %s %s partition %u %s\n",
part_md->disk->disk_name, mmc_card_id(card),
@@ -2436,7 +2436,7 @@ static int mmc_blk_probe(struct device *dev)
if (IS_ERR(md))
return PTR_ERR(md);
- string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
+ string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
cap_str, sizeof(cap_str));
pr_info("%s: %s %s %s %s\n",
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index bd5916a60cb5..77363d680532 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -400,7 +400,7 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
}
#ifdef CONFIG_BCM47XX
-#include <bcm47xx_nvram.h>
+#include <linux/bcm47xx_nvram.h>
static void b44_wap54g10_workaround(struct b44 *bp)
{
char buf[20];
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 5cb93d1f50a4..de77d3a74abc 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -17,7 +17,7 @@
#include <linux/phy_fixed.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
-#include <bcm47xx_nvram.h>
+#include <linux/bcm47xx_nvram.h>
static const struct bcma_device_id bgmac_bcma_tbl[] = {
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index d93b2b6b1f7a..82f7000a285d 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -21,6 +21,7 @@
* objects.
*/
+#include <linux/file.h>
#include <linux/mm.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
@@ -224,10 +225,18 @@ static inline unsigned long fast_get_dcookie(struct path *path)
static unsigned long get_exec_dcookie(struct mm_struct *mm)
{
unsigned long cookie = NO_COOKIE;
+ struct file *exe_file;
- if (mm && mm->exe_file)
- cookie = fast_get_dcookie(&mm->exe_file->f_path);
+ if (!mm)
+ goto done;
+
+ exe_file = get_mm_exe_file(mm);
+ if (!exe_file)
+ goto done;
+ cookie = fast_get_dcookie(&exe_file->f_path);
+ fput(exe_file);
+done:
return cookie;
}
@@ -236,6 +245,8 @@ static unsigned long get_exec_dcookie(struct mm_struct *mm)
* pair that can then be added to the global event buffer. We make
* sure to do this lookup before a mm->mmap modification happens so
* we don't lose track.
+ *
+ * The caller must ensure the mm is not nil (ie: not a kernel thread).
*/
static unsigned long
lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
@@ -243,6 +254,7 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
unsigned long cookie = NO_COOKIE;
struct vm_area_struct *vma;
+ down_read(&mm->mmap_sem);
for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
if (addr < vma->vm_start || addr >= vma->vm_end)
@@ -262,6 +274,7 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
if (!vma)
cookie = INVALID_COOKIE;
+ up_read(&mm->mmap_sem);
return cookie;
}
@@ -402,20 +415,9 @@ static void release_mm(struct mm_struct *mm)
{
if (!mm)
return;
- up_read(&mm->mmap_sem);
mmput(mm);
}
-
-static struct mm_struct *take_tasks_mm(struct task_struct *task)
-{
- struct mm_struct *mm = get_task_mm(task);
- if (mm)
- down_read(&mm->mmap_sem);
- return mm;
-}
-
-
static inline int is_code(unsigned long val)
{
return val == ESCAPE_CODE;
@@ -532,7 +534,7 @@ void sync_buffer(int cpu)
new = (struct task_struct *)val;
oldmm = mm;
release_mm(oldmm);
- mm = take_tasks_mm(new);
+ mm = get_task_mm(new);
if (mm != oldmm)
cookie = get_exec_dcookie(mm);
add_user_ctx_switch(new, cookie);
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 09fde58b12e0..0adccbf5c83f 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -1,6 +1,9 @@
if X86
source "drivers/platform/x86/Kconfig"
endif
+if MIPS
+source "drivers/platform/mips/Kconfig"
+endif
if GOLDFISH
source "drivers/platform/goldfish/Kconfig"
endif
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index 3656b7b17b99..ca2692510733 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_X86) += x86/
+obj-$(CONFIG_MIPS) += mips/
obj-$(CONFIG_OLPC) += olpc/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_CHROME_PLATFORMS) += chrome/
diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig
new file mode 100644
index 000000000000..125e569017be
--- /dev/null
+++ b/drivers/platform/mips/Kconfig
@@ -0,0 +1,30 @@
+#
+# MIPS Platform Specific Drivers
+#
+
+menuconfig MIPS_PLATFORM_DEVICES
+ bool "MIPS Platform Specific Device Drivers"
+ default y
+ help
+ Say Y here to get to see options for device drivers of various
+ MIPS platforms, including vendor-specific netbook/laptop/desktop
+ extension and hardware monitor drivers. This option itself does
+ not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if MIPS_PLATFORM_DEVICES
+
+config MIPS_ACPI
+ bool
+ default y if LOONGSON_MACH3X
+
+config CPU_HWMON
+ tristate "Loongson CPU HWMon Driver"
+ depends on LOONGSON_MACH3X
+ select HWMON
+ default y
+ help
+ Loongson-3A/3B CPU Hwmon (temperature sensor) driver.
+
+endif # MIPS_PLATFORM_DEVICES
diff --git a/drivers/platform/mips/Makefile b/drivers/platform/mips/Makefile
new file mode 100644
index 000000000000..43412849b195
--- /dev/null
+++ b/drivers/platform/mips/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MIPS_ACPI) += acpi_init.o
+obj-$(CONFIG_CPU_HWMON) += cpu_hwmon.o
diff --git a/drivers/platform/mips/acpi_init.c b/drivers/platform/mips/acpi_init.c
new file mode 100644
index 000000000000..dbdad79ead8f
--- /dev/null
+++ b/drivers/platform/mips/acpi_init.c
@@ -0,0 +1,150 @@
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/export.h>
+
+#define SBX00_ACPI_IO_BASE 0x800
+#define SBX00_ACPI_IO_SIZE 0x100
+
+#define ACPI_PM_EVT_BLK (SBX00_ACPI_IO_BASE + 0x00) /* 4 bytes */
+#define ACPI_PM_CNT_BLK (SBX00_ACPI_IO_BASE + 0x04) /* 2 bytes */
+#define ACPI_PMA_CNT_BLK (SBX00_ACPI_IO_BASE + 0x0F) /* 1 byte */
+#define ACPI_PM_TMR_BLK (SBX00_ACPI_IO_BASE + 0x18) /* 4 bytes */
+#define ACPI_GPE0_BLK (SBX00_ACPI_IO_BASE + 0x10) /* 8 bytes */
+#define ACPI_END (SBX00_ACPI_IO_BASE + 0x80)
+
+#define PM_INDEX 0xCD6
+#define PM_DATA 0xCD7
+#define PM2_INDEX 0xCD0
+#define PM2_DATA 0xCD1
+
+/*
+ * SCI interrupt need acpi space, allocate here
+ */
+
+static int __init register_acpi_resource(void)
+{
+ request_region(SBX00_ACPI_IO_BASE, SBX00_ACPI_IO_SIZE, "acpi");
+ return 0;
+}
+
+static void pmio_write_index(u16 index, u8 reg, u8 value)
+{
+ outb(reg, index);
+ outb(value, index + 1);
+}
+
+static u8 pmio_read_index(u16 index, u8 reg)
+{
+ outb(reg, index);
+ return inb(index + 1);
+}
+
+void pm_iowrite(u8 reg, u8 value)
+{
+ pmio_write_index(PM_INDEX, reg, value);
+}
+EXPORT_SYMBOL(pm_iowrite);
+
+u8 pm_ioread(u8 reg)
+{
+ return pmio_read_index(PM_INDEX, reg);
+}
+EXPORT_SYMBOL(pm_ioread);
+
+void pm2_iowrite(u8 reg, u8 value)
+{
+ pmio_write_index(PM2_INDEX, reg, value);
+}
+EXPORT_SYMBOL(pm2_iowrite);
+
+u8 pm2_ioread(u8 reg)
+{
+ return pmio_read_index(PM2_INDEX, reg);
+}
+EXPORT_SYMBOL(pm2_ioread);
+
+static void acpi_hw_clear_status(void)
+{
+ u16 value;
+
+ /* PMStatus: Clear WakeStatus/PwrBtnStatus */
+ value = inw(ACPI_PM_EVT_BLK);
+ value |= (1 << 8 | 1 << 15);
+ outw(value, ACPI_PM_EVT_BLK);
+
+ /* GPEStatus: Clear all generated events */
+ outl(inl(ACPI_GPE0_BLK), ACPI_GPE0_BLK);
+}
+
+void acpi_registers_setup(void)
+{
+ u32 value;
+
+ /* PM Status Base */
+ pm_iowrite(0x20, ACPI_PM_EVT_BLK & 0xff);
+ pm_iowrite(0x21, ACPI_PM_EVT_BLK >> 8);
+
+ /* PM Control Base */
+ pm_iowrite(0x22, ACPI_PM_CNT_BLK & 0xff);
+ pm_iowrite(0x23, ACPI_PM_CNT_BLK >> 8);
+
+ /* GPM Base */
+ pm_iowrite(0x28, ACPI_GPE0_BLK & 0xff);
+ pm_iowrite(0x29, ACPI_GPE0_BLK >> 8);
+
+ /* ACPI End */
+ pm_iowrite(0x2e, ACPI_END & 0xff);
+ pm_iowrite(0x2f, ACPI_END >> 8);
+
+ /* IO Decode: When AcpiDecodeEnable set, South-Bridge uses the contents
+ * of the PM registers at index 0x20~0x2B to decode ACPI I/O address. */
+ pm_iowrite(0x0e, 1 << 3);
+
+ /* SCI_EN set */
+ outw(1, ACPI_PM_CNT_BLK);
+
+ /* Enable to generate SCI */
+ pm_iowrite(0x10, pm_ioread(0x10) | 1);
+
+ /* GPM3/GPM9 enable */
+ value = inl(ACPI_GPE0_BLK + 4);
+ outl(value | (1 << 14) | (1 << 22), ACPI_GPE0_BLK + 4);
+
+ /* Set GPM9 as input */
+ pm_iowrite(0x8d, pm_ioread(0x8d) & (~(1 << 1)));
+
+ /* Set GPM9 as non-output */
+ pm_iowrite(0x94, pm_ioread(0x94) | (1 << 3));
+
+ /* GPM3 config ACPI trigger SCIOUT */
+ pm_iowrite(0x33, pm_ioread(0x33) & (~(3 << 4)));
+
+ /* GPM9 config ACPI trigger SCIOUT */
+ pm_iowrite(0x3d, pm_ioread(0x3d) & (~(3 << 2)));
+
+ /* GPM3 config falling edge trigger */
+ pm_iowrite(0x37, pm_ioread(0x37) & (~(1 << 6)));
+
+ /* No wait for STPGNT# in ACPI Sx state */
+ pm_iowrite(0x7c, pm_ioread(0x7c) | (1 << 6));
+
+ /* Set GPM3 pull-down enable */
+ value = pm2_ioread(0xf6);
+ value |= ((1 << 7) | (1 << 3));
+ pm2_iowrite(0xf6, value);
+
+ /* Set GPM9 pull-down enable */
+ value = pm2_ioread(0xf8);
+ value |= ((1 << 5) | (1 << 1));
+ pm2_iowrite(0xf8, value);
+}
+
+int __init sbx00_acpi_init(void)
+{
+ register_acpi_resource();
+ acpi_registers_setup();
+ acpi_hw_clear_status();
+
+ return 0;
+}
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
new file mode 100644
index 000000000000..0f6c63e17049
--- /dev/null
+++ b/drivers/platform/mips/cpu_hwmon.c
@@ -0,0 +1,207 @@
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/jiffies.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#include <loongson.h>
+#include <boot_param.h>
+#include <loongson_hwmon.h>
+
+/*
+ * Loongson-3 series cpu has two sensors inside,
+ * each of them from 0 to 255,
+ * if more than 127, that is dangerous.
+ * here only provide sensor1 data, because it always hot than sensor0
+ */
+int loongson3_cpu_temp(int cpu)
+{
+ u32 reg;
+
+ reg = LOONGSON_CHIPTEMP(cpu);
+ if (loongson_sysconf.cputype == Loongson_3A)
+ reg = (reg >> 8) & 0xff;
+ else if (loongson_sysconf.cputype == Loongson_3B)
+ reg = ((reg >> 8) & 0xff) - 100;
+
+ return (int)reg * 1000;
+}
+
+static struct device *cpu_hwmon_dev;
+
+static ssize_t get_hwmon_name(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, get_hwmon_name, NULL, 0);
+
+static struct attribute *cpu_hwmon_attributes[] = {
+ &sensor_dev_attr_name.dev_attr.attr,
+ NULL
+};
+
+/* Hwmon device attribute group */
+static struct attribute_group cpu_hwmon_attribute_group = {
+ .attrs = cpu_hwmon_attributes,
+};
+
+/* Hwmon device get name */
+static ssize_t get_hwmon_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "cpu-hwmon\n");
+}
+
+static ssize_t get_cpu0_temp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t get_cpu1_temp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t cpu0_temp_label(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t cpu1_temp_label(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, get_cpu0_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, cpu0_temp_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, get_cpu1_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, cpu1_temp_label, NULL, 2);
+
+static const struct attribute *hwmon_cputemp1[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute *hwmon_cputemp2[] = {
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_label.dev_attr.attr,
+ NULL
+};
+
+static ssize_t cpu0_temp_label(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "CPU 0 Temprature\n");
+}
+
+static ssize_t cpu1_temp_label(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "CPU 1 Temprature\n");
+}
+
+static ssize_t get_cpu0_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int value = loongson3_cpu_temp(0);
+ return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t get_cpu1_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int value = loongson3_cpu_temp(1);
+ return sprintf(buf, "%d\n", value);
+}
+
+static int create_sysfs_cputemp_files(struct kobject *kobj)
+{
+ int ret;
+
+ ret = sysfs_create_files(kobj, hwmon_cputemp1);
+ if (ret)
+ goto sysfs_create_temp1_fail;
+
+ if (loongson_sysconf.nr_cpus <= loongson_sysconf.cores_per_package)
+ return 0;
+
+ ret = sysfs_create_files(kobj, hwmon_cputemp2);
+ if (ret)
+ goto sysfs_create_temp2_fail;
+
+ return 0;
+
+sysfs_create_temp2_fail:
+ sysfs_remove_files(kobj, hwmon_cputemp1);
+
+sysfs_create_temp1_fail:
+ return -1;
+}
+
+static void remove_sysfs_cputemp_files(struct kobject *kobj)
+{
+ sysfs_remove_files(&cpu_hwmon_dev->kobj, hwmon_cputemp1);
+
+ if (loongson_sysconf.nr_cpus > loongson_sysconf.cores_per_package)
+ sysfs_remove_files(&cpu_hwmon_dev->kobj, hwmon_cputemp2);
+}
+
+#define CPU_THERMAL_THRESHOLD 90000
+static struct delayed_work thermal_work;
+
+static void do_thermal_timer(struct work_struct *work)
+{
+ int value = loongson3_cpu_temp(0);
+ if (value <= CPU_THERMAL_THRESHOLD)
+ schedule_delayed_work(&thermal_work, msecs_to_jiffies(5000));
+ else
+ orderly_poweroff(true);
+}
+
+static int __init loongson_hwmon_init(void)
+{
+ int ret;
+
+ pr_info("Loongson Hwmon Enter...\n");
+
+ cpu_hwmon_dev = hwmon_device_register(NULL);
+ if (IS_ERR(cpu_hwmon_dev)) {
+ ret = -ENOMEM;
+ pr_err("hwmon_device_register fail!\n");
+ goto fail_hwmon_device_register;
+ }
+
+ ret = sysfs_create_group(&cpu_hwmon_dev->kobj,
+ &cpu_hwmon_attribute_group);
+ if (ret) {
+ pr_err("fail to create loongson hwmon!\n");
+ goto fail_sysfs_create_group_hwmon;
+ }
+
+ ret = create_sysfs_cputemp_files(&cpu_hwmon_dev->kobj);
+ if (ret) {
+ pr_err("fail to create cpu temprature interface!\n");
+ goto fail_create_sysfs_cputemp_files;
+ }
+
+ INIT_DEFERRABLE_WORK(&thermal_work, do_thermal_timer);
+ schedule_delayed_work(&thermal_work, msecs_to_jiffies(20000));
+
+ return ret;
+
+fail_create_sysfs_cputemp_files:
+ sysfs_remove_group(&cpu_hwmon_dev->kobj,
+ &cpu_hwmon_attribute_group);
+
+fail_sysfs_create_group_hwmon:
+ hwmon_device_unregister(cpu_hwmon_dev);
+
+fail_hwmon_device_register:
+ return ret;
+}
+
+static void __exit loongson_hwmon_exit(void)
+{
+ cancel_delayed_work_sync(&thermal_work);
+ remove_sysfs_cputemp_files(&cpu_hwmon_dev->kobj);
+ sysfs_remove_group(&cpu_hwmon_dev->kobj,
+ &cpu_hwmon_attribute_group);
+ hwmon_device_unregister(cpu_hwmon_dev);
+}
+
+module_init(loongson_hwmon_init);
+module_exit(loongson_hwmon_exit);
+
+MODULE_AUTHOR("Yu Xiang <xiangy@lemote.com>");
+MODULE_AUTHOR("Huacai Chen <chenhc@lemote.com>");
+MODULE_DESCRIPTION("Loongson CPU Hwmon driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index b5b5c3d485d6..6149ae01e11f 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1111,6 +1111,16 @@ config RTC_DRV_DAVINCI
This driver can also be built as a module. If so, the module
will be called rtc-davinci.
+config RTC_DRV_DIGICOLOR
+ tristate "Conexant Digicolor RTC"
+ depends on ARCH_DIGICOLOR
+ help
+ If you say yes here you get support for the RTC on Conexant
+ Digicolor platforms. This currently includes the CX92755 SoC.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-digicolor.
+
config RTC_DRV_IMXDI
tristate "Freescale IMX DryIce Real Time Clock"
depends on ARCH_MXC
@@ -1121,11 +1131,11 @@ config RTC_DRV_IMXDI
will be called "rtc-imxdi".
config RTC_DRV_OMAP
- tristate "TI OMAP1"
- depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX || SOC_AM33XX
+ tristate "TI OMAP Real Time Clock"
+ depends on ARCH_OMAP || ARCH_DAVINCI
help
Say "yes" here to support the on chip real time clock
- present on TI OMAP1, AM33xx and DA8xx/OMAP-L13x.
+ present on TI OMAP1, AM33xx, DA8xx/OMAP-L13x, AM43xx and DRA7xx.
This driver can also be built as a module, if so, module
will be called rtc-omap.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 69c87062b098..c31731c29762 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_RTC_DRV_DA9052) += rtc-da9052.o
obj-$(CONFIG_RTC_DRV_DA9055) += rtc-da9055.o
obj-$(CONFIG_RTC_DRV_DA9063) += rtc-da9063.o
obj-$(CONFIG_RTC_DRV_DAVINCI) += rtc-davinci.o
+obj-$(CONFIG_RTC_DRV_DIGICOLOR) += rtc-digicolor.o
obj-$(CONFIG_RTC_DRV_DM355EVM) += rtc-dm355evm.o
obj-$(CONFIG_RTC_DRV_VRTC) += rtc-mrst.o
obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index c29ba7e14304..ea2a315df6b7 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -221,15 +221,15 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
rtc->pie_timer.function = rtc_pie_update_irq;
rtc->pie_enabled = 0;
+ strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
+ dev_set_name(&rtc->dev, "rtc%d", id);
+
/* Check to see if there is an ALARM already set in hw */
err = __rtc_read_alarm(rtc, &alrm);
if (!err && !rtc_valid_tm(&alrm.time))
rtc_initialize_alarm(rtc, &alrm);
- strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
- dev_set_name(&rtc->dev, "rtc%d", id);
-
rtc_dev_prepare(rtc);
err = device_register(&rtc->dev);
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
index 6c719f23520a..e1cfa06810ef 100644
--- a/drivers/rtc/hctosys.c
+++ b/drivers/rtc/hctosys.c
@@ -9,6 +9,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/rtc.h>
/* IMPORTANT: the RTC only stores whole seconds. It is arbitrary
@@ -32,8 +34,8 @@ static int __init rtc_hctosys(void)
struct rtc_device *rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
if (rtc == NULL) {
- pr_err("%s: unable to open rtc device (%s)\n",
- __FILE__, CONFIG_RTC_HCTOSYS_DEVICE);
+ pr_info("unable to open rtc device (%s)\n",
+ CONFIG_RTC_HCTOSYS_DEVICE);
goto err_open;
}
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index d43ee409a5f2..166fc60d8b55 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -31,13 +31,14 @@ static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
memset(tm, 0, sizeof(struct rtc_time));
err = rtc->ops->read_time(rtc->dev.parent, tm);
if (err < 0) {
- dev_err(&rtc->dev, "read_time: fail to read\n");
+ dev_dbg(&rtc->dev, "read_time: fail to read: %d\n",
+ err);
return err;
}
err = rtc_valid_tm(tm);
if (err < 0)
- dev_err(&rtc->dev, "read_time: rtc_time isn't valid\n");
+ dev_dbg(&rtc->dev, "read_time: rtc_time isn't valid\n");
}
return err;
}
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index cfc2ef98d393..b5cbc1bf5a3e 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -881,7 +881,7 @@ static const struct rtc_class_ops rtc_ops = {
.alarm_irq_enable = abb5zes3_rtc_alarm_irq_enable,
};
-static struct regmap_config abb5zes3_rtc_regmap_config = {
+static const struct regmap_config abb5zes3_rtc_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index b283a1a573b3..35efd3f75b18 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -37,9 +37,9 @@
#include "rtc-at91rm9200.h"
#define at91_rtc_read(field) \
- __raw_readl(at91_rtc_regs + field)
+ readl_relaxed(at91_rtc_regs + field)
#define at91_rtc_write(field, val) \
- __raw_writel((val), at91_rtc_regs + field)
+ writel_relaxed((val), at91_rtc_regs + field)
#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 87647f459198..a82556a0757a 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -28,6 +28,9 @@
* interrupts disabled, holding the global rtc_lock, to exclude those
* other drivers and utilities on correctly configured systems.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -385,8 +388,7 @@ static bool alarm_disable_quirk;
static int __init set_alarm_disable_quirk(const struct dmi_system_id *id)
{
alarm_disable_quirk = true;
- pr_info("rtc-cmos: BIOS has alarm-disable quirk. ");
- pr_info("RTC alarms disabled\n");
+ pr_info("BIOS has alarm-disable quirk - RTC alarms disabled\n");
return 0;
}
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c
index 613c43b7e9ae..1ba4371cbc2d 100644
--- a/drivers/rtc/rtc-da9052.c
+++ b/drivers/rtc/rtc-da9052.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/err.h>
+#include <linux/delay.h>
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
@@ -23,6 +24,8 @@
#define rtc_err(rtc, fmt, ...) \
dev_err(rtc->da9052->dev, "%s: " fmt, __func__, ##__VA_ARGS__)
+#define DA9052_GET_TIME_RETRIES 5
+
struct da9052_rtc {
struct rtc_device *rtc;
struct da9052 *da9052;
@@ -58,22 +61,43 @@ static irqreturn_t da9052_rtc_irq(int irq, void *data)
static int da9052_read_alarm(struct da9052_rtc *rtc, struct rtc_time *rtc_tm)
{
int ret;
- uint8_t v[5];
+ uint8_t v[2][5];
+ int idx = 1;
+ int timeout = DA9052_GET_TIME_RETRIES;
- ret = da9052_group_read(rtc->da9052, DA9052_ALARM_MI_REG, 5, v);
- if (ret != 0) {
+ ret = da9052_group_read(rtc->da9052, DA9052_ALARM_MI_REG, 5, &v[0][0]);
+ if (ret) {
rtc_err(rtc, "Failed to group read ALM: %d\n", ret);
return ret;
}
- rtc_tm->tm_year = (v[4] & DA9052_RTC_YEAR) + 100;
- rtc_tm->tm_mon = (v[3] & DA9052_RTC_MONTH) - 1;
- rtc_tm->tm_mday = v[2] & DA9052_RTC_DAY;
- rtc_tm->tm_hour = v[1] & DA9052_RTC_HOUR;
- rtc_tm->tm_min = v[0] & DA9052_RTC_MIN;
+ do {
+ ret = da9052_group_read(rtc->da9052,
+ DA9052_ALARM_MI_REG, 5, &v[idx][0]);
+ if (ret) {
+ rtc_err(rtc, "Failed to group read ALM: %d\n", ret);
+ return ret;
+ }
- ret = rtc_valid_tm(rtc_tm);
- return ret;
+ if (memcmp(&v[0][0], &v[1][0], 5) == 0) {
+ rtc_tm->tm_year = (v[0][4] & DA9052_RTC_YEAR) + 100;
+ rtc_tm->tm_mon = (v[0][3] & DA9052_RTC_MONTH) - 1;
+ rtc_tm->tm_mday = v[0][2] & DA9052_RTC_DAY;
+ rtc_tm->tm_hour = v[0][1] & DA9052_RTC_HOUR;
+ rtc_tm->tm_min = v[0][0] & DA9052_RTC_MIN;
+
+ ret = rtc_valid_tm(rtc_tm);
+ return ret;
+ }
+
+ idx = (1-idx);
+ msleep(20);
+
+ } while (timeout--);
+
+ rtc_err(rtc, "Timed out reading alarm time\n");
+
+ return -EIO;
}
static int da9052_set_alarm(struct da9052_rtc *rtc, struct rtc_time *rtc_tm)
@@ -135,24 +159,45 @@ static int da9052_rtc_get_alarm_status(struct da9052_rtc *rtc)
static int da9052_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
{
struct da9052_rtc *rtc = dev_get_drvdata(dev);
- uint8_t v[6];
int ret;
+ uint8_t v[2][6];
+ int idx = 1;
+ int timeout = DA9052_GET_TIME_RETRIES;
- ret = da9052_group_read(rtc->da9052, DA9052_COUNT_S_REG, 6, v);
- if (ret < 0) {
+ ret = da9052_group_read(rtc->da9052, DA9052_COUNT_S_REG, 6, &v[0][0]);
+ if (ret) {
rtc_err(rtc, "Failed to read RTC time : %d\n", ret);
return ret;
}
- rtc_tm->tm_year = (v[5] & DA9052_RTC_YEAR) + 100;
- rtc_tm->tm_mon = (v[4] & DA9052_RTC_MONTH) - 1;
- rtc_tm->tm_mday = v[3] & DA9052_RTC_DAY;
- rtc_tm->tm_hour = v[2] & DA9052_RTC_HOUR;
- rtc_tm->tm_min = v[1] & DA9052_RTC_MIN;
- rtc_tm->tm_sec = v[0] & DA9052_RTC_SEC;
+ do {
+ ret = da9052_group_read(rtc->da9052,
+ DA9052_COUNT_S_REG, 6, &v[idx][0]);
+ if (ret) {
+ rtc_err(rtc, "Failed to read RTC time : %d\n", ret);
+ return ret;
+ }
- ret = rtc_valid_tm(rtc_tm);
- return ret;
+ if (memcmp(&v[0][0], &v[1][0], 6) == 0) {
+ rtc_tm->tm_year = (v[0][5] & DA9052_RTC_YEAR) + 100;
+ rtc_tm->tm_mon = (v[0][4] & DA9052_RTC_MONTH) - 1;
+ rtc_tm->tm_mday = v[0][3] & DA9052_RTC_DAY;
+ rtc_tm->tm_hour = v[0][2] & DA9052_RTC_HOUR;
+ rtc_tm->tm_min = v[0][1] & DA9052_RTC_MIN;
+ rtc_tm->tm_sec = v[0][0] & DA9052_RTC_SEC;
+
+ ret = rtc_valid_tm(rtc_tm);
+ return ret;
+ }
+
+ idx = (1-idx);
+ msleep(20);
+
+ } while (timeout--);
+
+ rtc_err(rtc, "Timed out reading time\n");
+
+ return -EIO;
}
static int da9052_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -161,6 +206,10 @@ static int da9052_rtc_set_time(struct device *dev, struct rtc_time *tm)
uint8_t v[6];
int ret;
+ /* DA9052 only has 6 bits for year - to represent 2000-2063 */
+ if ((tm->tm_year < 100) || (tm->tm_year > 163))
+ return -EINVAL;
+
rtc = dev_get_drvdata(dev);
v[0] = tm->tm_sec;
@@ -198,6 +247,10 @@ static int da9052_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct rtc_time *tm = &alrm->time;
struct da9052_rtc *rtc = dev_get_drvdata(dev);
+ /* DA9052 only has 6 bits for year - to represent 2000-2063 */
+ if ((tm->tm_year < 100) || (tm->tm_year > 163))
+ return -EINVAL;
+
ret = da9052_rtc_enable_alarm(rtc, 0);
if (ret < 0)
return ret;
@@ -256,6 +309,8 @@ static int da9052_rtc_probe(struct platform_device *pdev)
return ret;
}
+ device_init_wakeup(&pdev->dev, true);
+
rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&da9052_rtc_ops, THIS_MODULE);
return PTR_ERR_OR_ZERO(rtc->rtc);
diff --git a/drivers/rtc/rtc-digicolor.c b/drivers/rtc/rtc-digicolor.c
new file mode 100644
index 000000000000..8d05596a6765
--- /dev/null
+++ b/drivers/rtc/rtc-digicolor.c
@@ -0,0 +1,227 @@
+/*
+ * Real Time Clock driver for Conexant Digicolor
+ *
+ * Copyright (C) 2015 Paradox Innovation Ltd.
+ *
+ * Author: Baruch Siach <baruch@tkos.co.il>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/of.h>
+
+#define DC_RTC_CONTROL 0x0
+#define DC_RTC_TIME 0x8
+#define DC_RTC_REFERENCE 0xc
+#define DC_RTC_ALARM 0x10
+#define DC_RTC_INTFLAG_CLEAR 0x14
+#define DC_RTC_INTENABLE 0x16
+
+#define DC_RTC_CMD_MASK 0xf
+#define DC_RTC_GO_BUSY BIT(7)
+
+#define CMD_NOP 0
+#define CMD_RESET 1
+#define CMD_WRITE 3
+#define CMD_READ 4
+
+#define CMD_DELAY_US (10*1000)
+#define CMD_TIMEOUT_US (500*CMD_DELAY_US)
+
+struct dc_rtc {
+ struct rtc_device *rtc_dev;
+ void __iomem *regs;
+};
+
+static int dc_rtc_cmds(struct dc_rtc *rtc, const u8 *cmds, int len)
+{
+ u8 val;
+ int i, ret;
+
+ for (i = 0; i < len; i++) {
+ writeb_relaxed((cmds[i] & DC_RTC_CMD_MASK) | DC_RTC_GO_BUSY,
+ rtc->regs + DC_RTC_CONTROL);
+ ret = readb_relaxed_poll_timeout(
+ rtc->regs + DC_RTC_CONTROL, val,
+ !(val & DC_RTC_GO_BUSY), CMD_DELAY_US, CMD_TIMEOUT_US);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dc_rtc_read(struct dc_rtc *rtc, unsigned long *val)
+{
+ static const u8 read_cmds[] = {CMD_READ, CMD_NOP};
+ u32 reference, time1, time2;
+ int ret;
+
+ ret = dc_rtc_cmds(rtc, read_cmds, ARRAY_SIZE(read_cmds));
+ if (ret < 0)
+ return ret;
+
+ reference = readl_relaxed(rtc->regs + DC_RTC_REFERENCE);
+ time1 = readl_relaxed(rtc->regs + DC_RTC_TIME);
+ /* Read twice to ensure consistency */
+ while (1) {
+ time2 = readl_relaxed(rtc->regs + DC_RTC_TIME);
+ if (time1 == time2)
+ break;
+ time1 = time2;
+ }
+
+ *val = reference + time1;
+ return 0;
+}
+
+static int dc_rtc_write(struct dc_rtc *rtc, u32 val)
+{
+ static const u8 write_cmds[] = {CMD_WRITE, CMD_NOP, CMD_RESET, CMD_NOP};
+
+ writel_relaxed(val, rtc->regs + DC_RTC_REFERENCE);
+ return dc_rtc_cmds(rtc, write_cmds, ARRAY_SIZE(write_cmds));
+}
+
+static int dc_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct dc_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long now;
+ int ret;
+
+ ret = dc_rtc_read(rtc, &now);
+ if (ret < 0)
+ return ret;
+ rtc_time64_to_tm(now, tm);
+
+ return 0;
+}
+
+static int dc_rtc_set_mmss(struct device *dev, unsigned long secs)
+{
+ struct dc_rtc *rtc = dev_get_drvdata(dev);
+
+ return dc_rtc_write(rtc, secs);
+}
+
+static int dc_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct dc_rtc *rtc = dev_get_drvdata(dev);
+ u32 alarm_reg, reference;
+ unsigned long now;
+ int ret;
+
+ alarm_reg = readl_relaxed(rtc->regs + DC_RTC_ALARM);
+ reference = readl_relaxed(rtc->regs + DC_RTC_REFERENCE);
+ rtc_time64_to_tm(reference + alarm_reg, &alarm->time);
+
+ ret = dc_rtc_read(rtc, &now);
+ if (ret < 0)
+ return ret;
+
+ alarm->pending = alarm_reg + reference > now;
+ alarm->enabled = readl_relaxed(rtc->regs + DC_RTC_INTENABLE);
+
+ return 0;
+}
+
+static int dc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct dc_rtc *rtc = dev_get_drvdata(dev);
+ time64_t alarm_time;
+ u32 reference;
+
+ alarm_time = rtc_tm_to_time64(&alarm->time);
+
+ reference = readl_relaxed(rtc->regs + DC_RTC_REFERENCE);
+ writel_relaxed(alarm_time - reference, rtc->regs + DC_RTC_ALARM);
+
+ writeb_relaxed(!!alarm->enabled, rtc->regs + DC_RTC_INTENABLE);
+
+ return 0;
+}
+
+static int dc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct dc_rtc *rtc = dev_get_drvdata(dev);
+
+ writeb_relaxed(!!enabled, rtc->regs + DC_RTC_INTENABLE);
+
+ return 0;
+}
+
+static struct rtc_class_ops dc_rtc_ops = {
+ .read_time = dc_rtc_read_time,
+ .set_mmss = dc_rtc_set_mmss,
+ .read_alarm = dc_rtc_read_alarm,
+ .set_alarm = dc_rtc_set_alarm,
+ .alarm_irq_enable = dc_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t dc_rtc_irq(int irq, void *dev_id)
+{
+ struct dc_rtc *rtc = dev_id;
+
+ writeb_relaxed(1, rtc->regs + DC_RTC_INTFLAG_CLEAR);
+ rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
+
+ return IRQ_HANDLED;
+}
+
+static int __init dc_rtc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct dc_rtc *rtc;
+ int irq, ret;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rtc->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rtc->regs))
+ return PTR_ERR(rtc->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ ret = devm_request_irq(&pdev->dev, irq, dc_rtc_irq, 0, pdev->name, rtc);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, rtc);
+ rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &dc_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc_dev))
+ return PTR_ERR(rtc->rtc_dev);
+
+ return 0;
+}
+
+static const struct of_device_id dc_dt_ids[] = {
+ { .compatible = "cnxt,cx92755-rtc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_dt_ids);
+
+static struct platform_driver dc_rtc_driver = {
+ .driver = {
+ .name = "digicolor_rtc",
+ .of_match_table = of_match_ptr(dc_dt_ids),
+ },
+};
+module_platform_driver_probe(dc_rtc_driver, dc_rtc_probe);
+
+MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
+MODULE_DESCRIPTION("Conexant Digicolor Realtime Clock Driver (RTC)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 8605fde394b2..167783fa7ac1 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -18,6 +18,8 @@
* "Sending and receiving", using SMBus level communication is preferred.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -406,7 +408,7 @@ static int ds1374_wdt_settimeout(unsigned int timeout)
/* Set new watchdog time */
ret = ds1374_write_rtc(save_client, timeout, DS1374_REG_WDALM0, 3);
if (ret) {
- pr_info("rtc-ds1374 - couldn't set new watchdog time\n");
+ pr_info("couldn't set new watchdog time\n");
goto out;
}
@@ -539,12 +541,12 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
if (options & WDIOS_DISABLECARD) {
- pr_info("rtc-ds1374: disable watchdog\n");
+ pr_info("disable watchdog\n");
ds1374_wdt_disable();
}
if (options & WDIOS_ENABLECARD) {
- pr_info("rtc-ds1374: enable watchdog\n");
+ pr_info("enable watchdog\n");
ds1374_wdt_settimeout(wdt_margin);
ds1374_wdt_ping();
}
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 803869c7d7c2..818a3635a8c8 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -16,6 +16,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/bcd.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -799,7 +801,7 @@ ds1685_rtc_proc(struct device *dev, struct seq_file *seq)
struct platform_device *pdev = to_platform_device(dev);
struct ds1685_priv *rtc = platform_get_drvdata(pdev);
u8 ctrla, ctrlb, ctrlc, ctrld, ctrl4a, ctrl4b, ssn[8];
- char *model = '\0';
+ char *model;
#ifdef CONFIG_RTC_DS1685_PROC_REGS
char bits[NUM_REGS][(NUM_BITS * NUM_SPACES) + NUM_BITS + 1];
#endif
@@ -2139,7 +2141,6 @@ ds1685_rtc_remove(struct platform_device *pdev)
static struct platform_driver ds1685_rtc_driver = {
.driver = {
.name = "rtc-ds1685",
- .owner = THIS_MODULE,
},
.probe = ds1685_rtc_probe,
.remove = ds1685_rtc_remove,
@@ -2175,7 +2176,7 @@ module_exit(ds1685_rtc_exit);
* ds1685_rtc_poweroff - uses the RTC chip to power the system off.
* @pdev: pointer to platform_device structure.
*/
-extern void __noreturn
+void __noreturn
ds1685_rtc_poweroff(struct platform_device *pdev)
{
u8 ctrla, ctrl4a, ctrl4b;
@@ -2183,7 +2184,7 @@ ds1685_rtc_poweroff(struct platform_device *pdev)
/* Check for valid RTC data, else, spin forever. */
if (unlikely(!pdev)) {
- pr_emerg("rtc-ds1685: platform device data not available, spinning forever ...\n");
+ pr_emerg("platform device data not available, spinning forever ...\n");
unreachable();
} else {
/* Get the rtc data. */
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index adaf06c41479..7e48e532214f 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -15,6 +15,8 @@
* "Sending and receiving", using SMBus level communication is preferred.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -373,8 +375,8 @@ static void ds3232_work(struct work_struct *work)
if (stat & DS3232_REG_SR_A1F) {
control = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
if (control < 0) {
- pr_warn("Read DS3232 Control Register error."
- "Disable IRQ%d.\n", client->irq);
+ pr_warn("Read Control Register error - Disable IRQ%d\n",
+ client->irq);
} else {
/* disable alarm1 interrupt */
control &= ~(DS3232_REG_CR_A1IE);
diff --git a/drivers/rtc/rtc-efi-platform.c b/drivers/rtc/rtc-efi-platform.c
index b40fbe332af4..1a7f1d1bc174 100644
--- a/drivers/rtc/rtc-efi-platform.c
+++ b/drivers/rtc/rtc-efi-platform.c
@@ -8,6 +8,9 @@
* Copyright (C) 1999-2000 VA Linux Systems
* Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c
index fccf36699245..4f4930a2004c 100644
--- a/drivers/rtc/rtc-em3027.c
+++ b/drivers/rtc/rtc-em3027.c
@@ -15,6 +15,7 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/module.h>
+#include <linux/of.h>
/* Registers */
#define EM3027_REG_ON_OFF_CTRL 0x00
@@ -135,10 +136,20 @@ static struct i2c_device_id em3027_id[] = {
{ "em3027", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, em3027_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id em3027_of_match[] = {
+ { .compatible = "emmicro,em3027", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, em3027_of_match);
+#endif
static struct i2c_driver em3027_driver = {
.driver = {
.name = "rtc-em3027",
+ .of_match_table = of_match_ptr(em3027_of_match),
},
.probe = &em3027_probe,
.id_table = em3027_id,
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index b936bb4096b5..0f710e98538f 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -66,7 +66,7 @@
#define HYM8563_ALM_BIT_DISABLE BIT(7)
#define HYM8563_CLKOUT 0x0d
-#define HYM8563_CLKOUT_DISABLE BIT(7)
+#define HYM8563_CLKOUT_ENABLE BIT(7)
#define HYM8563_CLKOUT_32768 0
#define HYM8563_CLKOUT_1024 1
#define HYM8563_CLKOUT_32 2
@@ -309,7 +309,7 @@ static unsigned long hym8563_clkout_recalc_rate(struct clk_hw *hw,
struct i2c_client *client = hym8563->client;
int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT);
- if (ret < 0 || ret & HYM8563_CLKOUT_DISABLE)
+ if (ret < 0)
return 0;
ret &= HYM8563_CLKOUT_MASK;
@@ -360,9 +360,9 @@ static int hym8563_clkout_control(struct clk_hw *hw, bool enable)
return ret;
if (enable)
- ret &= ~HYM8563_CLKOUT_DISABLE;
+ ret |= HYM8563_CLKOUT_ENABLE;
else
- ret |= HYM8563_CLKOUT_DISABLE;
+ ret &= ~HYM8563_CLKOUT_ENABLE;
return i2c_smbus_write_byte_data(client, HYM8563_CLKOUT, ret);
}
@@ -386,7 +386,7 @@ static int hym8563_clkout_is_prepared(struct clk_hw *hw)
if (ret < 0)
return ret;
- return !(ret & HYM8563_CLKOUT_DISABLE);
+ return !!(ret & HYM8563_CLKOUT_ENABLE);
}
static const struct clk_ops hym8563_clkout_ops = {
@@ -407,7 +407,7 @@ static struct clk *hym8563_clkout_register_clk(struct hym8563 *hym8563)
int ret;
ret = i2c_smbus_write_byte_data(client, HYM8563_CLKOUT,
- HYM8563_CLKOUT_DISABLE);
+ 0);
if (ret < 0)
return ERR_PTR(ret);
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 7ff7427c2e6a..a82937e2f824 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -13,6 +13,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/bcd.h>
#include <linux/i2c.h>
#include <linux/init.h>
@@ -513,12 +515,12 @@ static int wdt_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
if (rv & WDIOS_DISABLECARD) {
- pr_info("rtc-m41t80: disable watchdog\n");
+ pr_info("disable watchdog\n");
wdt_disable();
}
if (rv & WDIOS_ENABLECARD) {
- pr_info("rtc-m41t80: enable watchdog\n");
+ pr_info("enable watchdog\n");
wdt_ping();
}
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index 9d71328e59b9..7632a87784c3 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -12,6 +12,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
#include <linux/rtc.h>
#include <linux/delay.h>
@@ -103,8 +105,8 @@ static int max77686_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
data[RTC_YEAR] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0;
if (tm->tm_year < 100) {
- pr_warn("%s: MAX77686 RTC cannot handle the year %d."
- "Assume it's 2000.\n", __func__, 1900 + tm->tm_year);
+ pr_warn("RTC cannot handle the year %d. Assume it's 2000.\n",
+ 1900 + tm->tm_year);
return -EINVAL;
}
return 0;
diff --git a/drivers/rtc/rtc-max8997.c b/drivers/rtc/rtc-max8997.c
index 67fbe559d535..9e02bcda0c09 100644
--- a/drivers/rtc/rtc-max8997.c
+++ b/drivers/rtc/rtc-max8997.c
@@ -12,6 +12,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
#include <linux/rtc.h>
#include <linux/delay.h>
@@ -107,8 +109,8 @@ static int max8997_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
data[RTC_YEAR] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0;
if (tm->tm_year < 100) {
- pr_warn("%s: MAX8997 RTC cannot handle the year %d."
- "Assume it's 2000.\n", __func__, 1900 + tm->tm_year);
+ pr_warn("RTC cannot handle the year %d. Assume it's 2000.\n",
+ 1900 + tm->tm_year);
return -EINVAL;
}
return 0;
@@ -424,7 +426,7 @@ static void max8997_rtc_enable_smpl(struct max8997_rtc_info *info, bool enable)
val = 0;
max8997_read_reg(info->rtc, MAX8997_RTC_WTSR_SMPL, &val);
- pr_info("%s: WTSR_SMPL(0x%02x)\n", __func__, val);
+ pr_info("WTSR_SMPL(0x%02x)\n", val);
}
static int max8997_rtc_init_reg(struct max8997_rtc_info *info)
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c
index 9bf877bdf836..c1c5c4e3b3b4 100644
--- a/drivers/rtc/rtc-msm6242.c
+++ b/drivers/rtc/rtc-msm6242.c
@@ -7,6 +7,8 @@
* Copyright (C) 1993 Hamish Macdonald
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -111,7 +113,7 @@ static void msm6242_lock(struct msm6242_priv *priv)
}
if (!cnt)
- pr_warn("msm6242: timed out waiting for RTC (0x%x)\n",
+ pr_warn("timed out waiting for RTC (0x%x)\n",
msm6242_read(priv, MSM6242_CD));
}
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 8e5851aa4369..8b6355ffaff9 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -118,12 +118,15 @@
#define KICK0_VALUE 0x83e70b13
#define KICK1_VALUE 0x95a4f1e0
+struct omap_rtc;
+
struct omap_rtc_device_type {
bool has_32kclk_en;
- bool has_kicker;
bool has_irqwakeen;
bool has_pmic_mode;
bool has_power_up_reset;
+ void (*lock)(struct omap_rtc *rtc);
+ void (*unlock)(struct omap_rtc *rtc);
};
struct omap_rtc {
@@ -156,6 +159,26 @@ static inline void rtc_writel(struct omap_rtc *rtc, unsigned int reg, u32 val)
writel(val, rtc->base + reg);
}
+static void am3352_rtc_unlock(struct omap_rtc *rtc)
+{
+ rtc_writel(rtc, OMAP_RTC_KICK0_REG, KICK0_VALUE);
+ rtc_writel(rtc, OMAP_RTC_KICK1_REG, KICK1_VALUE);
+}
+
+static void am3352_rtc_lock(struct omap_rtc *rtc)
+{
+ rtc_writel(rtc, OMAP_RTC_KICK0_REG, 0);
+ rtc_writel(rtc, OMAP_RTC_KICK1_REG, 0);
+}
+
+static void default_rtc_unlock(struct omap_rtc *rtc)
+{
+}
+
+static void default_rtc_lock(struct omap_rtc *rtc)
+{
+}
+
/*
* We rely on the rtc framework to handle locking (rtc->ops_lock),
* so the only other requirement is that register accesses which
@@ -186,7 +209,9 @@ static irqreturn_t rtc_irq(int irq, void *dev_id)
/* alarm irq? */
if (irq_data & OMAP_RTC_STATUS_ALARM) {
+ rtc->type->unlock(rtc);
rtc_write(rtc, OMAP_RTC_STATUS_REG, OMAP_RTC_STATUS_ALARM);
+ rtc->type->lock(rtc);
events |= RTC_IRQF | RTC_AF;
}
@@ -218,9 +243,11 @@ static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
irqwake_reg &= ~OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN;
}
rtc_wait_not_busy(rtc);
+ rtc->type->unlock(rtc);
rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, reg);
if (rtc->type->has_irqwakeen)
rtc_write(rtc, OMAP_RTC_IRQWAKEEN, irqwake_reg);
+ rtc->type->lock(rtc);
local_irq_enable();
return 0;
@@ -293,12 +320,14 @@ static int omap_rtc_set_time(struct device *dev, struct rtc_time *tm)
local_irq_disable();
rtc_wait_not_busy(rtc);
+ rtc->type->unlock(rtc);
rtc_write(rtc, OMAP_RTC_YEARS_REG, tm->tm_year);
rtc_write(rtc, OMAP_RTC_MONTHS_REG, tm->tm_mon);
rtc_write(rtc, OMAP_RTC_DAYS_REG, tm->tm_mday);
rtc_write(rtc, OMAP_RTC_HOURS_REG, tm->tm_hour);
rtc_write(rtc, OMAP_RTC_MINUTES_REG, tm->tm_min);
rtc_write(rtc, OMAP_RTC_SECONDS_REG, tm->tm_sec);
+ rtc->type->lock(rtc);
local_irq_enable();
@@ -341,6 +370,7 @@ static int omap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
local_irq_disable();
rtc_wait_not_busy(rtc);
+ rtc->type->unlock(rtc);
rtc_write(rtc, OMAP_RTC_ALARM_YEARS_REG, alm->time.tm_year);
rtc_write(rtc, OMAP_RTC_ALARM_MONTHS_REG, alm->time.tm_mon);
rtc_write(rtc, OMAP_RTC_ALARM_DAYS_REG, alm->time.tm_mday);
@@ -362,6 +392,7 @@ static int omap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, reg);
if (rtc->type->has_irqwakeen)
rtc_write(rtc, OMAP_RTC_IRQWAKEEN, irqwake_reg);
+ rtc->type->lock(rtc);
local_irq_enable();
@@ -391,6 +422,7 @@ static void omap_rtc_power_off(void)
unsigned long now;
u32 val;
+ rtc->type->unlock(rtc);
/* enable pmic_power_en control */
val = rtc_readl(rtc, OMAP_RTC_PMIC_REG);
rtc_writel(rtc, OMAP_RTC_PMIC_REG, val | OMAP_RTC_PMIC_POWER_EN_EN);
@@ -423,6 +455,7 @@ static void omap_rtc_power_off(void)
val = rtc_read(rtc, OMAP_RTC_INTERRUPTS_REG);
rtc_writel(rtc, OMAP_RTC_INTERRUPTS_REG,
val | OMAP_RTC_INTERRUPTS_IT_ALARM2);
+ rtc->type->lock(rtc);
/*
* Wait for alarm to trigger (within two seconds) and external PMIC to
@@ -442,17 +475,21 @@ static struct rtc_class_ops omap_rtc_ops = {
static const struct omap_rtc_device_type omap_rtc_default_type = {
.has_power_up_reset = true,
+ .lock = default_rtc_lock,
+ .unlock = default_rtc_unlock,
};
static const struct omap_rtc_device_type omap_rtc_am3352_type = {
.has_32kclk_en = true,
- .has_kicker = true,
.has_irqwakeen = true,
.has_pmic_mode = true,
+ .lock = am3352_rtc_lock,
+ .unlock = am3352_rtc_unlock,
};
static const struct omap_rtc_device_type omap_rtc_da830_type = {
- .has_kicker = true,
+ .lock = am3352_rtc_lock,
+ .unlock = am3352_rtc_unlock,
};
static const struct platform_device_id omap_rtc_id_table[] = {
@@ -484,7 +521,7 @@ static const struct of_device_id omap_rtc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, omap_rtc_of_match);
-static int __init omap_rtc_probe(struct platform_device *pdev)
+static int omap_rtc_probe(struct platform_device *pdev)
{
struct omap_rtc *rtc;
struct resource *res;
@@ -527,10 +564,7 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- if (rtc->type->has_kicker) {
- rtc_writel(rtc, OMAP_RTC_KICK0_REG, KICK0_VALUE);
- rtc_writel(rtc, OMAP_RTC_KICK1_REG, KICK1_VALUE);
- }
+ rtc->type->unlock(rtc);
/*
* disable interrupts
@@ -593,6 +627,8 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
if (reg != new_ctrl)
rtc_write(rtc, OMAP_RTC_CTRL_REG, new_ctrl);
+ rtc->type->lock(rtc);
+
device_init_wakeup(&pdev->dev, true);
rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
@@ -626,8 +662,7 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
err:
device_init_wakeup(&pdev->dev, false);
- if (rtc->type->has_kicker)
- rtc_writel(rtc, OMAP_RTC_KICK0_REG, 0);
+ rtc->type->lock(rtc);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -646,11 +681,11 @@ static int __exit omap_rtc_remove(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 0);
+ rtc->type->unlock(rtc);
/* leave rtc running, but disable irqs */
rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0);
- if (rtc->type->has_kicker)
- rtc_writel(rtc, OMAP_RTC_KICK0_REG, 0);
+ rtc->type->lock(rtc);
/* Disable the clock/module */
pm_runtime_put_sync(&pdev->dev);
@@ -666,6 +701,7 @@ static int omap_rtc_suspend(struct device *dev)
rtc->interrupts_reg = rtc_read(rtc, OMAP_RTC_INTERRUPTS_REG);
+ rtc->type->unlock(rtc);
/*
* FIXME: the RTC alarm is not currently acting as a wakeup event
* source on some platforms, and in fact this enable() call is just
@@ -675,6 +711,7 @@ static int omap_rtc_suspend(struct device *dev)
enable_irq_wake(rtc->irq_alarm);
else
rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0);
+ rtc->type->lock(rtc);
/* Disable the clock/module */
pm_runtime_put_sync(dev);
@@ -689,10 +726,12 @@ static int omap_rtc_resume(struct device *dev)
/* Enable the clock/module so that we can access the registers */
pm_runtime_get_sync(dev);
+ rtc->type->unlock(rtc);
if (device_may_wakeup(dev))
disable_irq_wake(rtc->irq_alarm);
else
rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, rtc->interrupts_reg);
+ rtc->type->lock(rtc);
return 0;
}
@@ -709,12 +748,15 @@ static void omap_rtc_shutdown(struct platform_device *pdev)
* Keep the ALARM interrupt enabled to allow the system to power up on
* alarm events.
*/
+ rtc->type->unlock(rtc);
mask = rtc_read(rtc, OMAP_RTC_INTERRUPTS_REG);
mask &= OMAP_RTC_INTERRUPTS_IT_ALARM;
rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, mask);
+ rtc->type->lock(rtc);
}
static struct platform_driver omap_rtc_driver = {
+ .probe = omap_rtc_probe,
.remove = __exit_p(omap_rtc_remove),
.shutdown = omap_rtc_shutdown,
.driver = {
@@ -725,7 +767,7 @@ static struct platform_driver omap_rtc_driver = {
.id_table = omap_rtc_id_table,
};
-module_platform_driver_probe(omap_rtc_driver, omap_rtc_probe);
+module_platform_driver(omap_rtc_driver);
MODULE_ALIAS("platform:omap_rtc");
MODULE_AUTHOR("George G. Davis (and others)");
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index 95f652165fe9..7061dcae2b09 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -16,8 +16,9 @@
* along with this program.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRVNAME "rtc-opal"
-#define pr_fmt(fmt) DRVNAME ": " fmt
#include <linux/module.h>
#include <linux/err.h>
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 96fb32e7d6f8..0ba7e59929be 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -246,7 +246,6 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
{
struct pcf8563 *pcf8563 = i2c_get_clientdata(client);
- int err;
unsigned char buf[9];
dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
@@ -272,12 +271,8 @@ static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
buf[PCF8563_REG_DW] = tm->tm_wday & 0x07;
- err = pcf8563_write_block_data(client, PCF8563_REG_SC,
+ return pcf8563_write_block_data(client, PCF8563_REG_SC,
9 - PCF8563_REG_SC, buf + PCF8563_REG_SC);
- if (err)
- return err;
-
- return 0;
}
#ifdef CONFIG_RTC_INTF_DEV
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index f4cf6851fae9..76cbad7a99d3 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -39,7 +39,6 @@ struct s3c_rtc {
void __iomem *base;
struct clk *rtc_clk;
struct clk *rtc_src_clk;
- bool enabled;
struct s3c_rtc_data *data;
@@ -67,26 +66,25 @@ struct s3c_rtc_data {
void (*disable) (struct s3c_rtc *info);
};
-static void s3c_rtc_alarm_clk_enable(struct s3c_rtc *info, bool enable)
+static void s3c_rtc_enable_clk(struct s3c_rtc *info)
{
unsigned long irq_flags;
spin_lock_irqsave(&info->alarm_clk_lock, irq_flags);
- if (enable) {
- if (!info->enabled) {
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
- info->enabled = true;
- }
- } else {
- if (info->enabled) {
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
- info->enabled = false;
- }
- }
+ clk_enable(info->rtc_clk);
+ if (info->data->needs_src_clk)
+ clk_enable(info->rtc_src_clk);
+ spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags);
+}
+
+static void s3c_rtc_disable_clk(struct s3c_rtc *info)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&info->alarm_clk_lock, irq_flags);
+ if (info->data->needs_src_clk)
+ clk_disable(info->rtc_src_clk);
+ clk_disable(info->rtc_clk);
spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags);
}
@@ -119,20 +117,16 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
dev_dbg(info->dev, "%s: aie=%d\n", __func__, enabled);
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
+ s3c_rtc_enable_clk(info);
+
tmp = readb(info->base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
if (enabled)
tmp |= S3C2410_RTCALM_ALMEN;
writeb(tmp, info->base + S3C2410_RTCALM);
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
- s3c_rtc_alarm_clk_enable(info, enabled);
+ s3c_rtc_disable_clk(info);
return 0;
}
@@ -143,18 +137,12 @@ static int s3c_rtc_setfreq(struct s3c_rtc *info, int freq)
if (!is_power_of_2(freq))
return -EINVAL;
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
spin_lock_irq(&info->pie_lock);
if (info->data->set_freq)
info->data->set_freq(info, freq);
spin_unlock_irq(&info->pie_lock);
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
return 0;
}
@@ -165,9 +153,7 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
struct s3c_rtc *info = dev_get_drvdata(dev);
unsigned int have_retried = 0;
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
+ s3c_rtc_enable_clk(info);
retry_get_time:
rtc_tm->tm_min = readb(info->base + S3C2410_RTCMIN);
@@ -194,6 +180,8 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
+ s3c_rtc_disable_clk(info);
+
rtc_tm->tm_year += 100;
dev_dbg(dev, "read time %04d.%02d.%02d %02d:%02d:%02d\n",
@@ -202,10 +190,6 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
rtc_tm->tm_mon -= 1;
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
-
return rtc_valid_tm(rtc_tm);
}
@@ -225,9 +209,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
return -EINVAL;
}
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
+ s3c_rtc_enable_clk(info);
writeb(bin2bcd(tm->tm_sec), info->base + S3C2410_RTCSEC);
writeb(bin2bcd(tm->tm_min), info->base + S3C2410_RTCMIN);
@@ -236,9 +218,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
writeb(bin2bcd(tm->tm_mon + 1), info->base + S3C2410_RTCMON);
writeb(bin2bcd(year), info->base + S3C2410_RTCYEAR);
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
+ s3c_rtc_disable_clk(info);
return 0;
}
@@ -249,9 +229,7 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
struct rtc_time *alm_tm = &alrm->time;
unsigned int alm_en;
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
+ s3c_rtc_enable_clk(info);
alm_tm->tm_sec = readb(info->base + S3C2410_ALMSEC);
alm_tm->tm_min = readb(info->base + S3C2410_ALMMIN);
@@ -262,6 +240,8 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
alm_en = readb(info->base + S3C2410_RTCALM);
+ s3c_rtc_disable_clk(info);
+
alrm->enabled = (alm_en & S3C2410_RTCALM_ALMEN) ? 1 : 0;
dev_dbg(dev, "read alarm %d, %04d.%02d.%02d %02d:%02d:%02d\n",
@@ -269,9 +249,7 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
1900 + alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
-
/* decode the alarm enable field */
-
if (alm_en & S3C2410_RTCALM_SECEN)
alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec);
else
@@ -304,10 +282,6 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
else
alm_tm->tm_year = -1;
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
-
return 0;
}
@@ -317,15 +291,13 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
struct rtc_time *tm = &alrm->time;
unsigned int alrm_en;
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
-
dev_dbg(dev, "s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
alrm->enabled,
1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
+ s3c_rtc_enable_clk(info);
+
alrm_en = readb(info->base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
writeb(0x00, info->base + S3C2410_RTCALM);
@@ -348,11 +320,9 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
writeb(alrm_en, info->base + S3C2410_RTCALM);
- s3c_rtc_setaie(dev, alrm->enabled);
+ s3c_rtc_disable_clk(info);
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
+ s3c_rtc_setaie(dev, alrm->enabled);
return 0;
}
@@ -361,16 +331,12 @@ static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
{
struct s3c_rtc *info = dev_get_drvdata(dev);
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
+ s3c_rtc_enable_clk(info);
if (info->data->enable_tick)
info->data->enable_tick(info, seq);
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
+ s3c_rtc_disable_clk(info);
return 0;
}
@@ -388,10 +354,6 @@ static void s3c24xx_rtc_enable(struct s3c_rtc *info)
{
unsigned int con, tmp;
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
-
con = readw(info->base + S3C2410_RTCCON);
/* re-enable the device, and check it is ok */
if ((con & S3C2410_RTCCON_RTCEN) == 0) {
@@ -417,20 +379,12 @@ static void s3c24xx_rtc_enable(struct s3c_rtc *info)
writew(tmp & ~S3C2410_RTCCON_CLKRST,
info->base + S3C2410_RTCCON);
}
-
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
}
static void s3c24xx_rtc_disable(struct s3c_rtc *info)
{
unsigned int con;
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
-
con = readw(info->base + S3C2410_RTCCON);
con &= ~S3C2410_RTCCON_RTCEN;
writew(con, info->base + S3C2410_RTCCON);
@@ -438,28 +392,16 @@ static void s3c24xx_rtc_disable(struct s3c_rtc *info)
con = readb(info->base + S3C2410_TICNT);
con &= ~S3C2410_TICNT_ENABLE;
writeb(con, info->base + S3C2410_TICNT);
-
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
}
static void s3c6410_rtc_disable(struct s3c_rtc *info)
{
unsigned int con;
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
-
con = readw(info->base + S3C2410_RTCCON);
con &= ~S3C64XX_RTCCON_TICEN;
con &= ~S3C2410_RTCCON_RTCEN;
writew(con, info->base + S3C2410_RTCCON);
-
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
}
static int s3c_rtc_remove(struct platform_device *pdev)
@@ -554,6 +496,20 @@ static int s3c_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
+ /* Check RTC Time */
+ if (s3c_rtc_gettime(&pdev->dev, &rtc_tm)) {
+ rtc_tm.tm_year = 100;
+ rtc_tm.tm_mon = 0;
+ rtc_tm.tm_mday = 1;
+ rtc_tm.tm_hour = 0;
+ rtc_tm.tm_min = 0;
+ rtc_tm.tm_sec = 0;
+
+ s3c_rtc_settime(&pdev->dev, &rtc_tm);
+
+ dev_warn(&pdev->dev, "warning: invalid RTC value so initializing it\n");
+ }
+
/* register RTC and exit */
info->rtc = devm_rtc_device_register(&pdev->dev, "s3c", &s3c_rtcops,
THIS_MODULE);
@@ -577,36 +533,21 @@ static int s3c_rtc_probe(struct platform_device *pdev)
goto err_nortc;
}
- /* Check RTC Time */
- s3c_rtc_gettime(&pdev->dev, &rtc_tm);
-
- if (rtc_valid_tm(&rtc_tm)) {
- rtc_tm.tm_year = 100;
- rtc_tm.tm_mon = 0;
- rtc_tm.tm_mday = 1;
- rtc_tm.tm_hour = 0;
- rtc_tm.tm_min = 0;
- rtc_tm.tm_sec = 0;
-
- s3c_rtc_settime(&pdev->dev, &rtc_tm);
-
- dev_warn(&pdev->dev, "warning: invalid RTC value so initializing it\n");
- }
-
if (info->data->select_tick_clk)
info->data->select_tick_clk(info);
s3c_rtc_setfreq(info, 1);
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
+ s3c_rtc_disable_clk(info);
return 0;
err_nortc:
if (info->data->disable)
info->data->disable(info);
+
+ if (info->data->needs_src_clk)
+ clk_disable_unprepare(info->rtc_src_clk);
clk_disable_unprepare(info->rtc_clk);
return ret;
@@ -618,9 +559,7 @@ static int s3c_rtc_suspend(struct device *dev)
{
struct s3c_rtc *info = dev_get_drvdata(dev);
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
+ s3c_rtc_enable_clk(info);
/* save TICNT for anyone using periodic interrupts */
if (info->data->save_tick_cnt)
@@ -636,10 +575,6 @@ static int s3c_rtc_suspend(struct device *dev)
dev_err(dev, "enable_irq_wake failed\n");
}
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
-
return 0;
}
@@ -647,25 +582,19 @@ static int s3c_rtc_resume(struct device *dev)
{
struct s3c_rtc *info = dev_get_drvdata(dev);
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
-
if (info->data->enable)
info->data->enable(info);
if (info->data->restore_tick_cnt)
info->data->restore_tick_cnt(info);
+ s3c_rtc_disable_clk(info);
+
if (device_may_wakeup(dev) && info->wake_en) {
disable_irq_wake(info->irq_alarm);
info->wake_en = false;
}
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
-
return 0;
}
#endif
@@ -673,29 +602,13 @@ static SIMPLE_DEV_PM_OPS(s3c_rtc_pm_ops, s3c_rtc_suspend, s3c_rtc_resume);
static void s3c24xx_rtc_irq(struct s3c_rtc *info, int mask)
{
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
rtc_update_irq(info->rtc, 1, RTC_AF | RTC_IRQF);
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
-
- s3c_rtc_alarm_clk_enable(info, false);
}
static void s3c6410_rtc_irq(struct s3c_rtc *info, int mask)
{
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
rtc_update_irq(info->rtc, 1, RTC_AF | RTC_IRQF);
writeb(mask, info->base + S3C2410_INTP);
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
-
- s3c_rtc_alarm_clk_enable(info, false);
}
static void s3c2410_rtc_setfreq(struct s3c_rtc *info, int freq)
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 4008b84246ca..8c70d785ba73 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -15,6 +15,8 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/bcd.h>
@@ -90,7 +92,7 @@ struct s5m_rtc_info {
struct regmap *regmap;
struct rtc_device *rtc_dev;
int irq;
- int device_type;
+ enum sec_device_type device_type;
int rtc_24hr_mode;
const struct s5m_rtc_reg_config *regs;
};
@@ -146,7 +148,7 @@ static int s5m8767_tm_to_data(struct rtc_time *tm, u8 *data)
data[RTC_YEAR1] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0;
if (tm->tm_year < 100) {
- pr_err("s5m8767 RTC cannot handle the year %d.\n",
+ pr_err("RTC cannot handle the year %d\n",
1900 + tm->tm_year);
return -EINVAL;
} else {
@@ -187,6 +189,7 @@ static inline int s5m_check_peding_alarm_interrupt(struct s5m_rtc_info *info,
val &= S5M_ALARM0_STATUS;
break;
case S2MPS14X:
+ case S2MPS13X:
ret = regmap_read(info->s5m87xx->regmap_pmic, S2MPS14_REG_ST2,
&val);
val &= S2MPS_ALARM0_STATUS;
@@ -252,6 +255,9 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
case S2MPS14X:
data |= S2MPS_RTC_RUDR_MASK;
break;
+ case S2MPS13X:
+ data |= S2MPS13_RTC_AUDR_MASK;
+ break;
default:
return -EINVAL;
}
@@ -265,6 +271,11 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
ret = s5m8767_wait_for_udr_update(info);
+ /* On S2MPS13 the AUDR is not auto-cleared */
+ if (info->device_type == S2MPS13X)
+ regmap_update_bits(info->regmap, info->regs->rtc_udr_update,
+ S2MPS13_RTC_AUDR_MASK, 0);
+
return ret;
}
@@ -306,7 +317,7 @@ static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm)
u8 data[info->regs->regs_count];
int ret;
- if (info->device_type == S2MPS14X) {
+ if (info->device_type == S2MPS14X || info->device_type == S2MPS13X) {
ret = regmap_update_bits(info->regmap,
info->regs->rtc_udr_update,
S2MPS_RTC_RUDR_MASK, S2MPS_RTC_RUDR_MASK);
@@ -329,6 +340,7 @@ static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm)
case S5M8767X:
case S2MPS14X:
+ case S2MPS13X:
s5m8767_data_to_tm(data, tm, info->rtc_24hr_mode);
break;
@@ -355,6 +367,7 @@ static int s5m_rtc_set_time(struct device *dev, struct rtc_time *tm)
break;
case S5M8767X:
case S2MPS14X:
+ case S2MPS13X:
ret = s5m8767_tm_to_data(tm, data);
break;
default:
@@ -402,6 +415,7 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
case S5M8767X:
case S2MPS14X:
+ case S2MPS13X:
s5m8767_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
alrm->enabled = 0;
for (i = 0; i < info->regs->regs_count; i++) {
@@ -450,6 +464,7 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
case S5M8767X:
case S2MPS14X:
+ case S2MPS13X:
for (i = 0; i < info->regs->regs_count; i++)
data[i] &= ~ALARM_ENABLE_MASK;
@@ -494,6 +509,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
case S5M8767X:
case S2MPS14X:
+ case S2MPS13X:
data[RTC_SEC] |= ALARM_ENABLE_MASK;
data[RTC_MIN] |= ALARM_ENABLE_MASK;
data[RTC_HOUR] |= ALARM_ENABLE_MASK;
@@ -533,6 +549,7 @@ static int s5m_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
case S5M8767X:
case S2MPS14X:
+ case S2MPS13X:
s5m8767_tm_to_data(&alrm->time, data);
break;
@@ -615,6 +632,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
break;
case S2MPS14X:
+ case S2MPS13X:
data[0] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
ret = regmap_write(info->regmap, info->regs->ctrl, data[0]);
break;
@@ -650,8 +668,9 @@ static int s5m_rtc_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- switch (pdata->device_type) {
+ switch (platform_get_device_id(pdev)->driver_data) {
case S2MPS14X:
+ case S2MPS13X:
regmap_cfg = &s2mps14_rtc_regmap_config;
info->regs = &s2mps_rtc_regs;
alarm_irq = S2MPS14_IRQ_RTCA0;
@@ -667,7 +686,9 @@ static int s5m_rtc_probe(struct platform_device *pdev)
alarm_irq = S5M8767_IRQ_RTCA1;
break;
default:
- dev_err(&pdev->dev, "Device type is not supported by RTC driver\n");
+ dev_err(&pdev->dev,
+ "Device type %lu is not supported by RTC driver\n",
+ platform_get_device_id(pdev)->driver_data);
return -ENODEV;
}
@@ -687,7 +708,7 @@ static int s5m_rtc_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
info->s5m87xx = s5m87xx;
- info->device_type = s5m87xx->device_type;
+ info->device_type = platform_get_device_id(pdev)->driver_data;
if (s5m87xx->irq_data) {
info->irq = regmap_irq_get_virq(s5m87xx->irq_data, alarm_irq);
@@ -772,6 +793,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
static const struct platform_device_id s5m_rtc_id[] = {
{ "s5m-rtc", S5M8767X },
+ { "s2mps13-rtc", S2MPS13X },
{ "s2mps14-rtc", S2MPS14X },
{ },
};
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 2939cdcb2688..eb09eddf39b8 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -42,6 +42,8 @@
#define STMP3XXX_RTC_STAT 0x10
#define STMP3XXX_RTC_STAT_STALE_SHIFT 16
#define STMP3XXX_RTC_STAT_RTC_PRESENT 0x80000000
+#define STMP3XXX_RTC_STAT_XTAL32000_PRESENT 0x10000000
+#define STMP3XXX_RTC_STAT_XTAL32768_PRESENT 0x08000000
#define STMP3XXX_RTC_SECONDS 0x30
@@ -52,9 +54,13 @@
#define STMP3XXX_RTC_PERSISTENT0 0x60
#define STMP3XXX_RTC_PERSISTENT0_SET 0x64
#define STMP3XXX_RTC_PERSISTENT0_CLR 0x68
-#define STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN 0x00000002
-#define STMP3XXX_RTC_PERSISTENT0_ALARM_EN 0x00000004
-#define STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE 0x00000080
+#define STMP3XXX_RTC_PERSISTENT0_CLOCKSOURCE (1 << 0)
+#define STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN (1 << 1)
+#define STMP3XXX_RTC_PERSISTENT0_ALARM_EN (1 << 2)
+#define STMP3XXX_RTC_PERSISTENT0_XTAL24MHZ_PWRUP (1 << 4)
+#define STMP3XXX_RTC_PERSISTENT0_XTAL32KHZ_PWRUP (1 << 5)
+#define STMP3XXX_RTC_PERSISTENT0_XTAL32_FREQ (1 << 6)
+#define STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE (1 << 7)
#define STMP3XXX_RTC_PERSISTENT1 0x70
/* missing bitmask in headers */
@@ -248,6 +254,9 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev)
{
struct stmp3xxx_rtc_data *rtc_data;
struct resource *r;
+ u32 rtc_stat;
+ u32 pers0_set, pers0_clr;
+ u32 crystalfreq = 0;
int err;
rtc_data = devm_kzalloc(&pdev->dev, sizeof(*rtc_data), GFP_KERNEL);
@@ -268,8 +277,8 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev)
rtc_data->irq_alarm = platform_get_irq(pdev, 0);
- if (!(readl(STMP3XXX_RTC_STAT + rtc_data->io) &
- STMP3XXX_RTC_STAT_RTC_PRESENT)) {
+ rtc_stat = readl(rtc_data->io + STMP3XXX_RTC_STAT);
+ if (!(rtc_stat & STMP3XXX_RTC_STAT_RTC_PRESENT)) {
dev_err(&pdev->dev, "no device onboard\n");
return -ENODEV;
}
@@ -282,9 +291,54 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev)
return err;
}
+ /*
+ * Obviously the rtc needs a clock input to be able to run.
+ * This clock can be provided by an external 32k crystal. If that one is
+ * missing XTAL must not be disabled in suspend which consumes a
+ * lot of power. Normally the presence and exact frequency (supported
+ * are 32000 Hz and 32768 Hz) is detectable from fuses, but as reality
+ * proves these fuses are not blown correctly on all machines, so the
+ * frequency can be overridden in the device tree.
+ */
+ if (rtc_stat & STMP3XXX_RTC_STAT_XTAL32000_PRESENT)
+ crystalfreq = 32000;
+ else if (rtc_stat & STMP3XXX_RTC_STAT_XTAL32768_PRESENT)
+ crystalfreq = 32768;
+
+ of_property_read_u32(pdev->dev.of_node, "stmp,crystal-freq",
+ &crystalfreq);
+
+ switch (crystalfreq) {
+ case 32000:
+ /* keep 32kHz crystal running in low-power mode */
+ pers0_set = STMP3XXX_RTC_PERSISTENT0_XTAL32_FREQ |
+ STMP3XXX_RTC_PERSISTENT0_XTAL32KHZ_PWRUP |
+ STMP3XXX_RTC_PERSISTENT0_CLOCKSOURCE;
+ pers0_clr = STMP3XXX_RTC_PERSISTENT0_XTAL24MHZ_PWRUP;
+ break;
+ case 32768:
+ /* keep 32.768kHz crystal running in low-power mode */
+ pers0_set = STMP3XXX_RTC_PERSISTENT0_XTAL32KHZ_PWRUP |
+ STMP3XXX_RTC_PERSISTENT0_CLOCKSOURCE;
+ pers0_clr = STMP3XXX_RTC_PERSISTENT0_XTAL24MHZ_PWRUP |
+ STMP3XXX_RTC_PERSISTENT0_XTAL32_FREQ;
+ break;
+ default:
+ dev_warn(&pdev->dev,
+ "invalid crystal-freq specified in device-tree. Assuming no crystal\n");
+ /* fall-through */
+ case 0:
+ /* keep XTAL on in low-power mode */
+ pers0_set = STMP3XXX_RTC_PERSISTENT0_XTAL24MHZ_PWRUP;
+ pers0_clr = STMP3XXX_RTC_PERSISTENT0_XTAL32KHZ_PWRUP |
+ STMP3XXX_RTC_PERSISTENT0_CLOCKSOURCE;
+ }
+
+ writel(pers0_set, rtc_data->io + STMP3XXX_RTC_PERSISTENT0_SET);
+
writel(STMP3XXX_RTC_PERSISTENT0_ALARM_EN |
STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN |
- STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE,
+ STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE | pers0_clr,
rtc_data->io + STMP3XXX_RTC_PERSISTENT0_CLR);
writel(STMP3XXX_RTC_CTRL_ONEMSEC_IRQ_EN |
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 5baea3f54926..2dc787dc06c1 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -18,6 +18,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -145,8 +147,7 @@ static int twl_rtc_read_u8(u8 *data, u8 reg)
ret = twl_i2c_read_u8(TWL_MODULE_RTC, data, (rtc_reg_map[reg]));
if (ret < 0)
- pr_err("twl_rtc: Could not read TWL"
- "register %X - error %d\n", reg, ret);
+ pr_err("Could not read TWL register %X - error %d\n", reg, ret);
return ret;
}
@@ -159,8 +160,8 @@ static int twl_rtc_write_u8(u8 data, u8 reg)
ret = twl_i2c_write_u8(TWL_MODULE_RTC, data, (rtc_reg_map[reg]));
if (ret < 0)
- pr_err("twl_rtc: Could not write TWL"
- "register %X - error %d\n", reg, ret);
+ pr_err("Could not write TWL register %X - error %d\n",
+ reg, ret);
return ret;
}
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index b1de58e0b3d0..5638b7ba8b06 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -22,6 +22,7 @@
#include <linux/rtc.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/bitops.h>
#define DRV_VERSION "1.0.8"
@@ -366,8 +367,7 @@ static int x1205_get_atrim(struct i2c_client *client, int *trim)
* perform sign extension. The formula is
* Catr = (atr * 0.25pF) + 11.00pF.
*/
- if (atr & 0x20)
- atr |= 0xC0;
+ atr = sign_extend32(atr, 5);
dev_dbg(&client->dev, "%s: raw atr=%x (%d)\n", __func__, atr, atr);
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 8981701802ca..a777e5c412df 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -474,11 +474,11 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
*/
#ifndef USLEEP_SLEEP
/* 20 ms (reasonable hard disk speed) */
-#define USLEEP_SLEEP (20*HZ/1000)
+#define USLEEP_SLEEP msecs_to_jiffies(20)
#endif
/* 300 RPM (floppy speed) */
#ifndef USLEEP_POLL
-#define USLEEP_POLL (200*HZ/1000)
+#define USLEEP_POLL msecs_to_jiffies(200)
#endif
#ifndef USLEEP_WAITLONG
/* RvC: (reasonable time to wait on select error) */
@@ -576,7 +576,7 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
trying_irqs |= mask;
- timeout = jiffies + (250 * HZ / 1000);
+ timeout = jiffies + msecs_to_jiffies(250);
probe_irq = NO_IRQ;
/*
@@ -634,7 +634,7 @@ static void prepare_info(struct Scsi_Host *instance)
"sg_tablesize %d, this_id %d, "
"flags { %s%s%s}, "
#if defined(USLEEP_POLL) && defined(USLEEP_WAITLONG)
- "USLEEP_POLL %d, USLEEP_WAITLONG %d, "
+ "USLEEP_POLL %lu, USLEEP_WAITLONG %lu, "
#endif
"options { %s} ",
instance->hostt->name, instance->io_port, instance->n_io_port,
@@ -1346,7 +1346,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
* selection.
*/
- timeout = jiffies + (250 * HZ / 1000);
+ timeout = jiffies + msecs_to_jiffies(250);
/*
* XXX very interesting - we're seeing a bounce where the BSY we
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index b32e77db0c48..9b3dd6ef6a0b 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -111,6 +111,41 @@
#define BYTE2(x) (unsigned char)((x) >> 16)
#define BYTE3(x) (unsigned char)((x) >> 24)
+/* MODE_SENSE data format */
+typedef struct {
+ struct {
+ u8 data_length;
+ u8 med_type;
+ u8 dev_par;
+ u8 bd_length;
+ } __attribute__((packed)) hd;
+ struct {
+ u8 dens_code;
+ u8 block_count[3];
+ u8 reserved;
+ u8 block_length[3];
+ } __attribute__((packed)) bd;
+ u8 mpc_buf[3];
+} __attribute__((packed)) aac_modep_data;
+
+/* MODE_SENSE_10 data format */
+typedef struct {
+ struct {
+ u8 data_length[2];
+ u8 med_type;
+ u8 dev_par;
+ u8 rsrvd[2];
+ u8 bd_length[2];
+ } __attribute__((packed)) hd;
+ struct {
+ u8 dens_code;
+ u8 block_count[3];
+ u8 reserved;
+ u8 block_length[3];
+ } __attribute__((packed)) bd;
+ u8 mpc_buf[3];
+} __attribute__((packed)) aac_modep10_data;
+
/*------------------------------------------------------------------------------
* S T R U C T S / T Y P E D E F S
*----------------------------------------------------------------------------*/
@@ -128,6 +163,48 @@ struct inquiry_data {
u8 inqd_prl[4]; /* Product Revision Level */
};
+/* Added for VPD 0x83 */
+typedef struct {
+ u8 CodeSet:4; /* VPD_CODE_SET */
+ u8 Reserved:4;
+ u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */
+ u8 Reserved2:4;
+ u8 Reserved3;
+ u8 IdentifierLength;
+ u8 VendId[8];
+ u8 ProductId[16];
+ u8 SerialNumber[8]; /* SN in ASCII */
+
+} TVPD_ID_Descriptor_Type_1;
+
+typedef struct {
+ u8 CodeSet:4; /* VPD_CODE_SET */
+ u8 Reserved:4;
+ u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */
+ u8 Reserved2:4;
+ u8 Reserved3;
+ u8 IdentifierLength;
+ struct TEU64Id {
+ u32 Serial;
+ /* The serial number supposed to be 40 bits,
+ * bit we only support 32, so make the last byte zero. */
+ u8 Reserved;
+ u8 VendId[3];
+ } EU64Id;
+
+} TVPD_ID_Descriptor_Type_2;
+
+typedef struct {
+ u8 DeviceType:5;
+ u8 DeviceTypeQualifier:3;
+ u8 PageCode;
+ u8 Reserved;
+ u8 PageLength;
+ TVPD_ID_Descriptor_Type_1 IdDescriptorType1;
+ TVPD_ID_Descriptor_Type_2 IdDescriptorType2;
+
+} TVPD_Page83;
+
/*
* M O D U L E G L O B A L S
*/
@@ -385,6 +462,11 @@ int aac_get_containers(struct aac_dev *dev)
if (status >= 0) {
dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
+ if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_SUPPORTED_240_VOLUMES) {
+ maximum_num_containers =
+ le32_to_cpu(dresp->MaxSimpleVolumes);
+ }
aac_fib_complete(fibptr);
}
/* FIB should be freed only after getting the response from the F/W */
@@ -438,7 +520,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
if ((le32_to_cpu(get_name_reply->status) == CT_OK)
&& (get_name_reply->data[0] != '\0')) {
char *sp = get_name_reply->data;
- sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0';
+ sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0';
while (*sp == ' ')
++sp;
if (*sp) {
@@ -539,6 +621,14 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
if ((le32_to_cpu(dresp->status) == ST_OK) &&
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
+ if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
+ dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200;
+ fsa_dev_ptr->block_size = 0x200;
+ } else {
+ fsa_dev_ptr->block_size =
+ le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size);
+ }
fsa_dev_ptr->valid = 1;
/* sense_key holds the current state of the spin-up */
if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
@@ -571,7 +661,9 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
int status;
dresp = (struct aac_mount *) fib_data(fibptr);
- dresp->mnt[0].capacityhigh = 0;
+ if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_VARIABLE_BLOCK_SIZE))
+ dresp->mnt[0].capacityhigh = 0;
if ((le32_to_cpu(dresp->status) != ST_OK) ||
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
_aac_probe_container2(context, fibptr);
@@ -586,7 +678,12 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
dinfo = (struct aac_query_mount *)fib_data(fibptr);
- dinfo->command = cpu_to_le32(VM_NameServe64);
+ if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_VARIABLE_BLOCK_SIZE)
+ dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
+ else
+ dinfo->command = cpu_to_le32(VM_NameServe64);
+
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
dinfo->type = cpu_to_le32(FT_FILESYS);
@@ -621,7 +718,12 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
dinfo = (struct aac_query_mount *)fib_data(fibptr);
- dinfo->command = cpu_to_le32(VM_NameServe);
+ if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_VARIABLE_BLOCK_SIZE)
+ dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
+ else
+ dinfo->command = cpu_to_le32(VM_NameServe);
+
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
dinfo->type = cpu_to_le32(FT_FILESYS);
scsicmd->SCp.ptr = (char *)callback;
@@ -835,14 +937,88 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr);
/* Failure is irrelevant, using default value instead */
if (le32_to_cpu(get_serial_reply->status) == CT_OK) {
- char sp[13];
- /* EVPD bit set */
- sp[0] = INQD_PDT_DA;
- sp[1] = scsicmd->cmnd[2];
- sp[2] = 0;
- sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
- le32_to_cpu(get_serial_reply->uid));
- scsi_sg_copy_from_buffer(scsicmd, sp, sizeof(sp));
+ /*Check to see if it's for VPD 0x83 or 0x80 */
+ if (scsicmd->cmnd[2] == 0x83) {
+ /* vpd page 0x83 - Device Identification Page */
+ int i;
+ TVPD_Page83 VPDPage83Data;
+
+ memset(((u8 *)&VPDPage83Data), 0,
+ sizeof(VPDPage83Data));
+
+ /* DIRECT_ACCESS_DEVIC */
+ VPDPage83Data.DeviceType = 0;
+ /* DEVICE_CONNECTED */
+ VPDPage83Data.DeviceTypeQualifier = 0;
+ /* VPD_DEVICE_IDENTIFIERS */
+ VPDPage83Data.PageCode = 0x83;
+ VPDPage83Data.Reserved = 0;
+ VPDPage83Data.PageLength =
+ sizeof(VPDPage83Data.IdDescriptorType1) +
+ sizeof(VPDPage83Data.IdDescriptorType2);
+
+ /* T10 Vendor Identifier Field Format */
+ /* VpdCodeSetAscii */
+ VPDPage83Data.IdDescriptorType1.CodeSet = 2;
+ /* VpdIdentifierTypeVendorId */
+ VPDPage83Data.IdDescriptorType1.IdentifierType = 1;
+ VPDPage83Data.IdDescriptorType1.IdentifierLength =
+ sizeof(VPDPage83Data.IdDescriptorType1) - 4;
+
+ /* "ADAPTEC " for adaptec */
+ memcpy(VPDPage83Data.IdDescriptorType1.VendId,
+ "ADAPTEC ",
+ sizeof(VPDPage83Data.IdDescriptorType1.VendId));
+ memcpy(VPDPage83Data.IdDescriptorType1.ProductId,
+ "ARRAY ",
+ sizeof(
+ VPDPage83Data.IdDescriptorType1.ProductId));
+
+ /* Convert to ascii based serial number.
+ * The LSB is the the end.
+ */
+ for (i = 0; i < 8; i++) {
+ u8 temp =
+ (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF);
+ if (temp > 0x9) {
+ VPDPage83Data.IdDescriptorType1.SerialNumber[i] =
+ 'A' + (temp - 0xA);
+ } else {
+ VPDPage83Data.IdDescriptorType1.SerialNumber[i] =
+ '0' + temp;
+ }
+ }
+
+ /* VpdCodeSetBinary */
+ VPDPage83Data.IdDescriptorType2.CodeSet = 1;
+ /* VpdIdentifierTypeEUI64 */
+ VPDPage83Data.IdDescriptorType2.IdentifierType = 2;
+ VPDPage83Data.IdDescriptorType2.IdentifierLength =
+ sizeof(VPDPage83Data.IdDescriptorType2) - 4;
+
+ VPDPage83Data.IdDescriptorType2.EU64Id.VendId[0] = 0xD0;
+ VPDPage83Data.IdDescriptorType2.EU64Id.VendId[1] = 0;
+ VPDPage83Data.IdDescriptorType2.EU64Id.VendId[2] = 0;
+
+ VPDPage83Data.IdDescriptorType2.EU64Id.Serial =
+ get_serial_reply->uid;
+ VPDPage83Data.IdDescriptorType2.EU64Id.Reserved = 0;
+
+ /* Move the inquiry data to the response buffer. */
+ scsi_sg_copy_from_buffer(scsicmd, &VPDPage83Data,
+ sizeof(VPDPage83Data));
+ } else {
+ /* It must be for VPD 0x80 */
+ char sp[13];
+ /* EVPD bit set */
+ sp[0] = INQD_PDT_DA;
+ sp[1] = scsicmd->cmnd[2];
+ sp[2] = 0;
+ sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
+ le32_to_cpu(get_serial_reply->uid));
+ scsi_sg_copy_from_buffer(scsicmd, sp,
+ sizeof(sp));
+ }
}
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
@@ -982,7 +1158,8 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
memset(readcmd2, 0, sizeof(struct aac_raw_io2));
readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
- readcmd2->byteCount = cpu_to_le32(count<<9);
+ readcmd2->byteCount = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
readcmd2->cid = cpu_to_le16(scmd_id(cmd));
readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
ret = aac_build_sgraw2(cmd, readcmd2,
@@ -997,7 +1174,8 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
readcmd = (struct aac_raw_io *) fib_data(fib);
readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
- readcmd->count = cpu_to_le32(count<<9);
+ readcmd->count = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
readcmd->cid = cpu_to_le16(scmd_id(cmd));
readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
readcmd->bpTotal = 0;
@@ -1062,6 +1240,7 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
{
u16 fibsize;
struct aac_read *readcmd;
+ struct aac_dev *dev = fib->dev;
long ret;
aac_fib_init(fib);
@@ -1069,7 +1248,8 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
readcmd->command = cpu_to_le32(VM_CtBlockRead);
readcmd->cid = cpu_to_le32(scmd_id(cmd));
readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
- readcmd->count = cpu_to_le32(count * 512);
+ readcmd->count = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
ret = aac_build_sg(cmd, &readcmd->sg);
if (ret < 0)
@@ -1104,7 +1284,8 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
memset(writecmd2, 0, sizeof(struct aac_raw_io2));
writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
- writecmd2->byteCount = cpu_to_le32(count<<9);
+ writecmd2->byteCount = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
writecmd2->cid = cpu_to_le16(scmd_id(cmd));
writecmd2->flags = (fua && ((aac_cache & 5) != 1) &&
(((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
@@ -1122,7 +1303,8 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
writecmd = (struct aac_raw_io *) fib_data(fib);
writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
- writecmd->count = cpu_to_le32(count<<9);
+ writecmd->count = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
writecmd->cid = cpu_to_le16(scmd_id(cmd));
writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
(((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
@@ -1190,6 +1372,7 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
{
u16 fibsize;
struct aac_write *writecmd;
+ struct aac_dev *dev = fib->dev;
long ret;
aac_fib_init(fib);
@@ -1197,7 +1380,8 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
writecmd->command = cpu_to_le32(VM_CtBlockWrite);
writecmd->cid = cpu_to_le32(scmd_id(cmd));
writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
- writecmd->count = cpu_to_le32(count * 512);
+ writecmd->count = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
writecmd->sg.count = cpu_to_le32(1);
/* ->stable is not used - it did mean which type of write */
@@ -2246,9 +2430,10 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
INQD_PDT_PROC : INQD_PDT_DA;
if (scsicmd->cmnd[2] == 0) {
/* supported vital product data pages */
- arr[3] = 2;
+ arr[3] = 3;
arr[4] = 0x0;
arr[5] = 0x80;
+ arr[6] = 0x83;
arr[1] = scsicmd->cmnd[2];
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
@@ -2264,7 +2449,16 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
- /* SLES 10 SP1 special */
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ } else if (scsicmd->cmnd[2] == 0x83) {
+ /* vpd page 0x83 - Device Identification Page */
+ char *sno = (char *)&inq_data;
+ sno[3] = setinqserial(dev, &sno[4],
+ scmd_id(scsicmd));
+ if (aac_wwn != 2)
+ return aac_get_container_serial(
+ scsicmd);
scsicmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
} else {
@@ -2329,10 +2523,10 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
cp[5] = (capacity >> 16) & 0xff;
cp[6] = (capacity >> 8) & 0xff;
cp[7] = (capacity >> 0) & 0xff;
- cp[8] = 0;
- cp[9] = 0;
- cp[10] = 2;
- cp[11] = 0;
+ cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
+ cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
+ cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
+ cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff;
cp[12] = 0;
alloc_len = ((scsicmd->cmnd[10] << 24)
@@ -2369,10 +2563,10 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
cp[1] = (capacity >> 16) & 0xff;
cp[2] = (capacity >> 8) & 0xff;
cp[3] = (capacity >> 0) & 0xff;
- cp[4] = 0;
- cp[5] = 0;
- cp[6] = 2;
- cp[7] = 0;
+ cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
+ cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
+ cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
+ cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff;
scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
@@ -2385,30 +2579,79 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case MODE_SENSE:
{
- char mode_buf[7];
int mode_buf_length = 4;
+ u32 capacity;
+ aac_modep_data mpd;
+
+ if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
+ capacity = fsa_dev_ptr[cid].size - 1;
+ else
+ capacity = (u32)-1;
dprintk((KERN_DEBUG "MODE SENSE command.\n"));
- mode_buf[0] = 3; /* Mode data length */
- mode_buf[1] = 0; /* Medium type - default */
- mode_buf[2] = 0; /* Device-specific param,
- bit 8: 0/1 = write enabled/protected
- bit 4: 0/1 = FUA enabled */
+ memset((char *)&mpd, 0, sizeof(aac_modep_data));
+
+ /* Mode data length */
+ mpd.hd.data_length = sizeof(mpd.hd) - 1;
+ /* Medium type - default */
+ mpd.hd.med_type = 0;
+ /* Device-specific param,
+ bit 8: 0/1 = write enabled/protected
+ bit 4: 0/1 = FUA enabled */
+ mpd.hd.dev_par = 0;
+
if (dev->raw_io_interface && ((aac_cache & 5) != 1))
- mode_buf[2] = 0x10;
- mode_buf[3] = 0; /* Block descriptor length */
+ mpd.hd.dev_par = 0x10;
+ if (scsicmd->cmnd[1] & 0x8)
+ mpd.hd.bd_length = 0; /* Block descriptor length */
+ else {
+ mpd.hd.bd_length = sizeof(mpd.bd);
+ mpd.hd.data_length += mpd.hd.bd_length;
+ mpd.bd.block_length[0] =
+ (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
+ mpd.bd.block_length[1] =
+ (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
+ mpd.bd.block_length[2] =
+ fsa_dev_ptr[cid].block_size & 0xff;
+
+ mpd.mpc_buf[0] = scsicmd->cmnd[2];
+ if (scsicmd->cmnd[2] == 0x1C) {
+ /* page length */
+ mpd.mpc_buf[1] = 0xa;
+ /* Mode data length */
+ mpd.hd.data_length = 23;
+ } else {
+ /* Mode data length */
+ mpd.hd.data_length = 15;
+ }
+
+ if (capacity > 0xffffff) {
+ mpd.bd.block_count[0] = 0xff;
+ mpd.bd.block_count[1] = 0xff;
+ mpd.bd.block_count[2] = 0xff;
+ } else {
+ mpd.bd.block_count[0] = (capacity >> 16) & 0xff;
+ mpd.bd.block_count[1] = (capacity >> 8) & 0xff;
+ mpd.bd.block_count[2] = capacity & 0xff;
+ }
+ }
if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
- mode_buf[0] = 6;
- mode_buf[4] = 8;
- mode_buf[5] = 1;
- mode_buf[6] = ((aac_cache & 6) == 2)
+ mpd.hd.data_length += 3;
+ mpd.mpc_buf[0] = 8;
+ mpd.mpc_buf[1] = 1;
+ mpd.mpc_buf[2] = ((aac_cache & 6) == 2)
? 0 : 0x04; /* WCE */
- mode_buf_length = 7;
- if (mode_buf_length > scsicmd->cmnd[4])
- mode_buf_length = scsicmd->cmnd[4];
+ mode_buf_length = sizeof(mpd);
}
- scsi_sg_copy_from_buffer(scsicmd, mode_buf, mode_buf_length);
+
+ if (mode_buf_length > scsicmd->cmnd[4])
+ mode_buf_length = scsicmd->cmnd[4];
+ else
+ mode_buf_length = sizeof(mpd);
+ scsi_sg_copy_from_buffer(scsicmd,
+ (char *)&mpd,
+ mode_buf_length);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
@@ -2416,34 +2659,77 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
}
case MODE_SENSE_10:
{
- char mode_buf[11];
+ u32 capacity;
int mode_buf_length = 8;
+ aac_modep10_data mpd10;
+
+ if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
+ capacity = fsa_dev_ptr[cid].size - 1;
+ else
+ capacity = (u32)-1;
dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
- mode_buf[0] = 0; /* Mode data length (MSB) */
- mode_buf[1] = 6; /* Mode data length (LSB) */
- mode_buf[2] = 0; /* Medium type - default */
- mode_buf[3] = 0; /* Device-specific param,
- bit 8: 0/1 = write enabled/protected
- bit 4: 0/1 = FUA enabled */
+ memset((char *)&mpd10, 0, sizeof(aac_modep10_data));
+ /* Mode data length (MSB) */
+ mpd10.hd.data_length[0] = 0;
+ /* Mode data length (LSB) */
+ mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1;
+ /* Medium type - default */
+ mpd10.hd.med_type = 0;
+ /* Device-specific param,
+ bit 8: 0/1 = write enabled/protected
+ bit 4: 0/1 = FUA enabled */
+ mpd10.hd.dev_par = 0;
+
if (dev->raw_io_interface && ((aac_cache & 5) != 1))
- mode_buf[3] = 0x10;
- mode_buf[4] = 0; /* reserved */
- mode_buf[5] = 0; /* reserved */
- mode_buf[6] = 0; /* Block descriptor length (MSB) */
- mode_buf[7] = 0; /* Block descriptor length (LSB) */
+ mpd10.hd.dev_par = 0x10;
+ mpd10.hd.rsrvd[0] = 0; /* reserved */
+ mpd10.hd.rsrvd[1] = 0; /* reserved */
+ if (scsicmd->cmnd[1] & 0x8) {
+ /* Block descriptor length (MSB) */
+ mpd10.hd.bd_length[0] = 0;
+ /* Block descriptor length (LSB) */
+ mpd10.hd.bd_length[1] = 0;
+ } else {
+ mpd10.hd.bd_length[0] = 0;
+ mpd10.hd.bd_length[1] = sizeof(mpd10.bd);
+
+ mpd10.hd.data_length[1] += mpd10.hd.bd_length[1];
+
+ mpd10.bd.block_length[0] =
+ (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
+ mpd10.bd.block_length[1] =
+ (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
+ mpd10.bd.block_length[2] =
+ fsa_dev_ptr[cid].block_size & 0xff;
+
+ if (capacity > 0xffffff) {
+ mpd10.bd.block_count[0] = 0xff;
+ mpd10.bd.block_count[1] = 0xff;
+ mpd10.bd.block_count[2] = 0xff;
+ } else {
+ mpd10.bd.block_count[0] =
+ (capacity >> 16) & 0xff;
+ mpd10.bd.block_count[1] =
+ (capacity >> 8) & 0xff;
+ mpd10.bd.block_count[2] =
+ capacity & 0xff;
+ }
+ }
if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
- mode_buf[1] = 9;
- mode_buf[8] = 8;
- mode_buf[9] = 1;
- mode_buf[10] = ((aac_cache & 6) == 2)
+ mpd10.hd.data_length[1] += 3;
+ mpd10.mpc_buf[0] = 8;
+ mpd10.mpc_buf[1] = 1;
+ mpd10.mpc_buf[2] = ((aac_cache & 6) == 2)
? 0 : 0x04; /* WCE */
- mode_buf_length = 11;
+ mode_buf_length = sizeof(mpd10);
if (mode_buf_length > scsicmd->cmnd[8])
mode_buf_length = scsicmd->cmnd[8];
}
- scsi_sg_copy_from_buffer(scsicmd, mode_buf, mode_buf_length);
+ scsi_sg_copy_from_buffer(scsicmd,
+ (char *)&mpd10,
+ mode_buf_length);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index eaaf8705a5f4..40fe65c91b41 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -6,13 +6,63 @@
#define nblank(x) _nblank(x)[0]
#include <linux/interrupt.h>
+#include <linux/pci.h>
/*------------------------------------------------------------------------------
* D E F I N E S
*----------------------------------------------------------------------------*/
+#define AAC_MAX_MSIX 8 /* vectors */
+#define AAC_PCI_MSI_ENABLE 0x8000
+
+enum {
+ AAC_ENABLE_INTERRUPT = 0x0,
+ AAC_DISABLE_INTERRUPT,
+ AAC_ENABLE_MSIX,
+ AAC_DISABLE_MSIX,
+ AAC_CLEAR_AIF_BIT,
+ AAC_CLEAR_SYNC_BIT,
+ AAC_ENABLE_INTX
+};
+
+#define AAC_INT_MODE_INTX (1<<0)
+#define AAC_INT_MODE_MSI (1<<1)
+#define AAC_INT_MODE_AIF (1<<2)
+#define AAC_INT_MODE_SYNC (1<<3)
+
+#define AAC_INT_ENABLE_TYPE1_INTX 0xfffffffb
+#define AAC_INT_ENABLE_TYPE1_MSIX 0xfffffffa
+#define AAC_INT_DISABLE_ALL 0xffffffff
+
+/* Bit definitions in IOA->Host Interrupt Register */
+#define PMC_TRANSITION_TO_OPERATIONAL (1<<31)
+#define PMC_IOARCB_TRANSFER_FAILED (1<<28)
+#define PMC_IOA_UNIT_CHECK (1<<27)
+#define PMC_NO_HOST_RRQ_FOR_CMD_RESPONSE (1<<26)
+#define PMC_CRITICAL_IOA_OP_IN_PROGRESS (1<<25)
+#define PMC_IOARRIN_LOST (1<<4)
+#define PMC_SYSTEM_BUS_MMIO_ERROR (1<<3)
+#define PMC_IOA_PROCESSOR_IN_ERROR_STATE (1<<2)
+#define PMC_HOST_RRQ_VALID (1<<1)
+#define PMC_OPERATIONAL_STATUS (1<<31)
+#define PMC_ALLOW_MSIX_VECTOR0 (1<<0)
+
+#define PMC_IOA_ERROR_INTERRUPTS (PMC_IOARCB_TRANSFER_FAILED | \
+ PMC_IOA_UNIT_CHECK | \
+ PMC_NO_HOST_RRQ_FOR_CMD_RESPONSE | \
+ PMC_IOARRIN_LOST | \
+ PMC_SYSTEM_BUS_MMIO_ERROR | \
+ PMC_IOA_PROCESSOR_IN_ERROR_STATE)
+
+#define PMC_ALL_INTERRUPT_BITS (PMC_IOA_ERROR_INTERRUPTS | \
+ PMC_HOST_RRQ_VALID | \
+ PMC_TRANSITION_TO_OPERATIONAL | \
+ PMC_ALLOW_MSIX_VECTOR0)
+#define PMC_GLOBAL_INT_BIT2 0x00000004
+#define PMC_GLOBAL_INT_BIT0 0x00000001
+
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 30300
+# define AAC_DRIVER_BUILD 40709
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -36,6 +86,7 @@
#define CONTAINER_TO_ID(cont) (cont)
#define CONTAINER_TO_LUN(cont) (0)
+#define PMC_DEVICE_S6 0x28b
#define PMC_DEVICE_S7 0x28c
#define PMC_DEVICE_S8 0x28d
#define PMC_DEVICE_S9 0x28f
@@ -434,7 +485,7 @@ enum fib_xfer_state {
struct aac_init
{
__le32 InitStructRevision;
- __le32 MiniPortRevision;
+ __le32 Sa_MSIXVectors;
__le32 fsrev;
__le32 CommHeaderAddress;
__le32 FastIoCommAreaAddress;
@@ -582,7 +633,8 @@ struct aac_queue {
spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
/* only valid for command queues which receive entries from the adapter. */
- u32 numpending; /* Number of entries on outstanding queue. */
+ /* Number of entries on outstanding queue. */
+ atomic_t numpending;
struct aac_dev * dev; /* Back pointer to adapter structure */
};
@@ -755,7 +807,8 @@ struct rkt_registers {
struct src_mu_registers {
/* PCI*| Name */
- __le32 reserved0[8]; /* 00h | Reserved */
+ __le32 reserved0[6]; /* 00h | Reserved */
+ __le32 IOAR[2]; /* 18h | IOA->host interrupt register */
__le32 IDR; /* 20h | Inbound Doorbell Register */
__le32 IISR; /* 24h | Inbound Int. Status Register */
__le32 reserved1[3]; /* 28h | Reserved */
@@ -767,17 +820,18 @@ struct src_mu_registers {
__le32 OMR; /* bch | Outbound Message Register */
__le32 IQ_L; /* c0h | Inbound Queue (Low address) */
__le32 IQ_H; /* c4h | Inbound Queue (High address) */
+ __le32 ODR_MSI; /* c8h | MSI register for sync./AIF */
};
struct src_registers {
- struct src_mu_registers MUnit; /* 00h - c7h */
+ struct src_mu_registers MUnit; /* 00h - cbh */
union {
struct {
- __le32 reserved1[130790]; /* c8h - 7fc5fh */
+ __le32 reserved1[130789]; /* cch - 7fc5fh */
struct src_inbound IndexRegs; /* 7fc60h */
} tupelo;
struct {
- __le32 reserved1[974]; /* c8h - fffh */
+ __le32 reserved1[973]; /* cch - fffh */
struct src_inbound IndexRegs; /* 1000h */
} denali;
} u;
@@ -857,6 +911,7 @@ struct fsa_dev_info {
u8 deleted;
char devname[8];
struct sense_data sense_data;
+ u32 block_size;
};
struct fib {
@@ -960,6 +1015,10 @@ struct aac_supplement_adapter_info
#define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002)
#define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004)
#define AAC_OPTION_DOORBELL_RESET cpu_to_le32(0x00004000)
+/* 4KB sector size */
+#define AAC_OPTION_VARIABLE_BLOCK_SIZE cpu_to_le32(0x00040000)
+/* 240 simple volume support */
+#define AAC_OPTION_SUPPORTED_240_VOLUMES cpu_to_le32(0x10000000)
#define AAC_SIS_VERSION_V3 3
#define AAC_SIS_SLOT_UNKNOWN 0xFF
@@ -1026,6 +1085,11 @@ struct aac_bus_info_response {
#define AAC_OPT_NEW_COMM_TYPE3 cpu_to_le32(1<<30)
#define AAC_OPT_NEW_COMM_TYPE4 cpu_to_le32(1<<31)
+/* MSIX context */
+struct aac_msix_ctx {
+ int vector_no;
+ struct aac_dev *dev;
+};
struct aac_dev
{
@@ -1081,8 +1145,10 @@ struct aac_dev
* if AAC_COMM_MESSAGE_TYPE1 */
dma_addr_t host_rrq_pa; /* phys. address */
- u32 host_rrq_idx; /* index into rrq buffer */
-
+ /* index into rrq buffer */
+ u32 host_rrq_idx[AAC_MAX_MSIX];
+ atomic_t rrq_outstanding[AAC_MAX_MSIX];
+ u32 fibs_pushed_no;
struct pci_dev *pdev; /* Our PCI interface */
void * printfbuf; /* pointer to buffer used for printf's from the adapter */
void * comm_addr; /* Base address of Comm area */
@@ -1151,6 +1217,13 @@ struct aac_dev
int sync_mode;
struct fib *sync_fib;
struct list_head sync_fib_list;
+ u32 doorbell_mask;
+ u32 max_msix; /* max. MSI-X vectors */
+ u32 vector_cap; /* MSI-X vector capab.*/
+ int msi_enabled; /* MSI/MSI-X enabled */
+ struct msix_entry msixentry[AAC_MAX_MSIX];
+ struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
+ u8 adapter_shutdown;
};
#define aac_adapter_interrupt(dev) \
@@ -1589,6 +1662,7 @@ struct aac_srb_reply
#define VM_CtHostWrite64 20
#define VM_DrvErrTblLog 21
#define VM_NameServe64 22
+#define VM_NameServeAllBlk 30
#define MAX_VMCOMMAND_NUM 23 /* used for sizing stats array - leave last */
@@ -1611,8 +1685,13 @@ struct aac_fsinfo {
__le32 fsInodeDensity;
}; /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
+struct aac_blockdevinfo {
+ __le32 block_size;
+};
+
union aac_contentinfo {
- struct aac_fsinfo filesys; /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
+ struct aac_fsinfo filesys;
+ struct aac_blockdevinfo bdevinfo;
};
/*
@@ -1677,6 +1756,7 @@ struct aac_get_container_count_resp {
__le32 MaxContainers;
__le32 ContainerSwitchEntries;
__le32 MaxPartitions;
+ __le32 MaxSimpleVolumes;
};
@@ -1951,6 +2031,8 @@ extern struct aac_common aac_config;
#define AifEnEnclosureManagement 13 /* EM_DRIVE_* */
#define EM_DRIVE_INSERTION 31
#define EM_DRIVE_REMOVAL 32
+#define EM_SES_DRIVE_INSERTION 33
+#define EM_SES_DRIVE_REMOVAL 26
#define AifEnBatteryEvent 14 /* Change in Battery State */
#define AifEnAddContainer 15 /* A new array was created */
#define AifEnDeleteContainer 16 /* A container was deleted */
@@ -1983,6 +2065,9 @@ extern struct aac_common aac_config;
/* PMC NEW COMM: Request the event data */
#define AifReqEvent 200
+/* RAW device deleted */
+#define AifRawDeviceRemove 203
+
/*
* Adapter Initiated FIB command structures. Start with the adapter
* initiated FIBs that really come from the adapter, and get responded
@@ -2025,6 +2110,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
int aac_fib_complete(struct fib * context);
#define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data)
struct aac_dev *aac_init_adapter(struct aac_dev *dev);
+void aac_src_access_devreg(struct aac_dev *dev, int mode);
int aac_get_config_status(struct aac_dev *dev, int commit_flag);
int aac_get_containers(struct aac_dev *dev);
int aac_scsi_cmd(struct scsi_cmnd *cmd);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index fbcd48d0bfc3..54195a117f72 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -689,7 +689,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
kfree (usg);
}
srbcmd->count = cpu_to_le32(byte_count);
- psg->count = cpu_to_le32(sg_indx+1);
+ if (user_srbcmd->sg.count)
+ psg->count = cpu_to_le32(sg_indx+1);
+ else
+ psg->count = 0;
status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
} else {
struct user_sgmap* upsg = &user_srbcmd->sg;
@@ -775,7 +778,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
}
}
srbcmd->count = cpu_to_le32(byte_count);
- psg->count = cpu_to_le32(sg_indx+1);
+ if (user_srbcmd->sg.count)
+ psg->count = cpu_to_le32(sg_indx+1);
+ else
+ psg->count = 0;
status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
}
if (status == -ERESTARTSYS) {
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 177b094c7792..45db84ad322f 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -43,6 +43,8 @@
#include "aacraid.h"
+static void aac_define_int_mode(struct aac_dev *dev);
+
struct aac_common aac_config = {
.irq_mod = 1
};
@@ -51,7 +53,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
{
unsigned char *base;
unsigned long size, align;
- const unsigned long fibsize = 4096;
+ const unsigned long fibsize = dev->max_fib_size;
const unsigned long printfbufsiz = 256;
unsigned long host_rrq_size = 0;
struct aac_init *init;
@@ -91,7 +93,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
if (dev->max_fib_size != sizeof(struct hw_fib))
init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4);
- init->MiniPortRevision = cpu_to_le32(Sa_MINIPORT_REVISION);
+ init->Sa_MSIXVectors = cpu_to_le32(Sa_MINIPORT_REVISION);
init->fsrev = cpu_to_le32(dev->fsrev);
/*
@@ -140,7 +142,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED);
init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32));
init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff));
- init->MiniPortRevision = cpu_to_le32(0L); /* number of MSI-X */
+ /* number of MSI-X */
+ init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
dprintk((KERN_WARNING"aacraid: New Comm Interface type2 enabled\n"));
}
@@ -179,7 +182,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
{
- q->numpending = 0;
+ atomic_set(&q->numpending, 0);
q->dev = dev;
init_waitqueue_head(&q->cmdready);
INIT_LIST_HEAD(&q->cmdq);
@@ -228,6 +231,12 @@ int aac_send_shutdown(struct aac_dev * dev)
/* FIB should be freed only after getting the response from the F/W */
if (status != -ERESTARTSYS)
aac_fib_free(fibctx);
+ dev->adapter_shutdown = 1;
+ if ((dev->pdev->device == PMC_DEVICE_S7 ||
+ dev->pdev->device == PMC_DEVICE_S8 ||
+ dev->pdev->device == PMC_DEVICE_S9) &&
+ dev->msi_enabled)
+ aac_src_access_devreg(dev, AAC_ENABLE_INTX);
return status;
}
@@ -350,8 +359,10 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
dev->raw_io_interface = dev->raw_io_64 = 0;
if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
- 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
+ 0, 0, 0, 0, 0, 0,
+ status+0, status+1, status+2, status+3, NULL)) &&
(status[0] == 0x00000001)) {
+ dev->doorbell_mask = status[3];
if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
dev->raw_io_64 = 1;
dev->sync_mode = aac_sync_mode;
@@ -388,6 +399,9 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
}
}
}
+ dev->max_msix = 0;
+ dev->msi_enabled = 0;
+ dev->adapter_shutdown = 0;
if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
0, 0, 0, 0, 0, 0,
status+0, status+1, status+2, status+3, status+4))
@@ -461,6 +475,11 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
if (host->can_queue > AAC_NUM_IO_FIB)
host->can_queue = AAC_NUM_IO_FIB;
+ if (dev->pdev->device == PMC_DEVICE_S6 ||
+ dev->pdev->device == PMC_DEVICE_S7 ||
+ dev->pdev->device == PMC_DEVICE_S8 ||
+ dev->pdev->device == PMC_DEVICE_S9)
+ aac_define_int_mode(dev);
/*
* Ok now init the communication subsystem
*/
@@ -489,4 +508,79 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
return dev;
}
-
+static void aac_define_int_mode(struct aac_dev *dev)
+{
+
+ int i, msi_count;
+
+ msi_count = i = 0;
+ /* max. vectors from GET_COMM_PREFERRED_SETTINGS */
+ if (dev->max_msix == 0 ||
+ dev->pdev->device == PMC_DEVICE_S6 ||
+ dev->sync_mode) {
+ dev->max_msix = 1;
+ dev->vector_cap =
+ dev->scsi_host_ptr->can_queue +
+ AAC_NUM_MGT_FIB;
+ return;
+ }
+
+ msi_count = min(dev->max_msix,
+ (unsigned int)num_online_cpus());
+
+ dev->max_msix = msi_count;
+
+ if (msi_count > AAC_MAX_MSIX)
+ msi_count = AAC_MAX_MSIX;
+
+ for (i = 0; i < msi_count; i++)
+ dev->msixentry[i].entry = i;
+
+ if (msi_count > 1 &&
+ pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) {
+ i = pci_enable_msix(dev->pdev,
+ dev->msixentry,
+ msi_count);
+ /* Check how many MSIX vectors are allocated */
+ if (i >= 0) {
+ dev->msi_enabled = 1;
+ if (i) {
+ msi_count = i;
+ if (pci_enable_msix(dev->pdev,
+ dev->msixentry,
+ msi_count)) {
+ dev->msi_enabled = 0;
+ printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n",
+ dev->name, dev->id, i);
+ }
+ }
+ } else {
+ dev->msi_enabled = 0;
+ printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n",
+ dev->name, dev->id, i);
+ }
+ }
+
+ if (!dev->msi_enabled) {
+ msi_count = 1;
+ i = pci_enable_msi(dev->pdev);
+
+ if (!i) {
+ dev->msi_enabled = 1;
+ dev->msi = 1;
+ } else {
+ printk(KERN_ERR "%s%d: MSI not supported!! Will try INTx 0x%x.\n",
+ dev->name, dev->id, i);
+ }
+ }
+
+ if (!dev->msi_enabled)
+ dev->max_msix = msi_count = 1;
+ else {
+ if (dev->max_msix > msi_count)
+ dev->max_msix = msi_count;
+ }
+ dev->vector_cap =
+ (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) /
+ msi_count;
+}
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index cab190af6345..4da574925284 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -208,14 +208,10 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
void aac_fib_free(struct fib *fibptr)
{
- unsigned long flags, flagsv;
+ unsigned long flags;
- spin_lock_irqsave(&fibptr->event_lock, flagsv);
- if (fibptr->done == 2) {
- spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
+ if (fibptr->done == 2)
return;
- }
- spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
@@ -321,7 +317,7 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
/* Queue is full */
if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
- qid, q->numpending);
+ qid, atomic_read(&q->numpending));
return 0;
} else {
*entry = q->base + *index;
@@ -414,7 +410,6 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
struct aac_dev * dev = fibptr->dev;
struct hw_fib * hw_fib = fibptr->hw_fib_va;
unsigned long flags = 0;
- unsigned long qflags;
unsigned long mflags = 0;
unsigned long sflags = 0;
@@ -568,9 +563,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
int blink;
if (time_is_before_eq_jiffies(timeout)) {
struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
- spin_lock_irqsave(q->lock, qflags);
- q->numpending--;
- spin_unlock_irqrestore(q->lock, qflags);
+ atomic_dec(&q->numpending);
if (wait == -1) {
printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
"Usually a result of a PCI interrupt routing problem;\n"
@@ -775,7 +768,6 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
int aac_fib_complete(struct fib *fibptr)
{
- unsigned long flags;
struct hw_fib * hw_fib = fibptr->hw_fib_va;
/*
@@ -798,12 +790,6 @@ int aac_fib_complete(struct fib *fibptr)
* command is complete that we had sent to the adapter and this
* cdb could be reused.
*/
- spin_lock_irqsave(&fibptr->event_lock, flags);
- if (fibptr->done == 2) {
- spin_unlock_irqrestore(&fibptr->event_lock, flags);
- return 0;
- }
- spin_unlock_irqrestore(&fibptr->event_lock, flags);
if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
@@ -868,7 +854,7 @@ void aac_printf(struct aac_dev *dev, u32 val)
* dispatches it to the appropriate routine for handling.
*/
-#define AIF_SNIFF_TIMEOUT (30*HZ)
+#define AIF_SNIFF_TIMEOUT (500*HZ)
static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
{
struct hw_fib * hw_fib = fibptr->hw_fib_va;
@@ -897,6 +883,39 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
switch (le32_to_cpu(aifcmd->command)) {
case AifCmdDriverNotify:
switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
+ case AifRawDeviceRemove:
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+ if ((container >> 28)) {
+ container = (u32)-1;
+ break;
+ }
+ channel = (container >> 24) & 0xF;
+ if (channel >= dev->maximum_num_channels) {
+ container = (u32)-1;
+ break;
+ }
+ id = container & 0xFFFF;
+ if (id >= dev->maximum_num_physicals) {
+ container = (u32)-1;
+ break;
+ }
+ lun = (container >> 16) & 0xFF;
+ container = (u32)-1;
+ channel = aac_phys_to_logical(channel);
+ device_config_needed =
+ (((__le32 *)aifcmd->data)[0] ==
+ cpu_to_le32(AifRawDeviceRemove)) ? DELETE : ADD;
+
+ if (device_config_needed == ADD) {
+ device = scsi_device_lookup(
+ dev->scsi_host_ptr,
+ channel, id, lun);
+ if (device) {
+ scsi_remove_device(device);
+ scsi_device_put(device);
+ }
+ }
+ break;
/*
* Morph or Expand complete
*/
@@ -1044,6 +1063,8 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
case EM_DRIVE_INSERTION:
case EM_DRIVE_REMOVAL:
+ case EM_SES_DRIVE_INSERTION:
+ case EM_SES_DRIVE_REMOVAL:
container = le32_to_cpu(
((__le32 *)aifcmd->data)[2]);
if ((container >> 28)) {
@@ -1069,8 +1090,10 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
}
channel = aac_phys_to_logical(channel);
device_config_needed =
- (((__le32 *)aifcmd->data)[3]
- == cpu_to_le32(EM_DRIVE_INSERTION)) ?
+ ((((__le32 *)aifcmd->data)[3]
+ == cpu_to_le32(EM_DRIVE_INSERTION)) ||
+ (((__le32 *)aifcmd->data)[3]
+ == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ?
ADD : DELETE;
break;
}
@@ -1247,12 +1270,13 @@ retry_next:
static int _aac_reset_adapter(struct aac_dev *aac, int forced)
{
int index, quirks;
- int retval;
+ int retval, i;
struct Scsi_Host *host;
struct scsi_device *dev;
struct scsi_cmnd *command;
struct scsi_cmnd *command_list;
int jafo = 0;
+ int cpu;
/*
* Assumptions:
@@ -1315,7 +1339,33 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
aac->comm_phys = 0;
kfree(aac->queues);
aac->queues = NULL;
- free_irq(aac->pdev->irq, aac);
+ cpu = cpumask_first(cpu_online_mask);
+ if (aac->pdev->device == PMC_DEVICE_S6 ||
+ aac->pdev->device == PMC_DEVICE_S7 ||
+ aac->pdev->device == PMC_DEVICE_S8 ||
+ aac->pdev->device == PMC_DEVICE_S9) {
+ if (aac->max_msix > 1) {
+ for (i = 0; i < aac->max_msix; i++) {
+ if (irq_set_affinity_hint(
+ aac->msixentry[i].vector,
+ NULL)) {
+ printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
+ aac->name,
+ aac->id,
+ cpu);
+ }
+ cpu = cpumask_next(cpu,
+ cpu_online_mask);
+ free_irq(aac->msixentry[i].vector,
+ &(aac->aac_msix[i]));
+ }
+ pci_disable_msix(aac->pdev);
+ } else {
+ free_irq(aac->pdev->irq, &(aac->aac_msix[0]));
+ }
+ } else {
+ free_irq(aac->pdev->irq, aac);
+ }
if (aac->msi)
pci_disable_msi(aac->pdev);
kfree(aac->fsa_dev);
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index d81b2810f0f7..da9d9936e995 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -84,7 +84,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
* continue. The caller has already been notified that
* the fib timed out.
*/
- dev->queues->queue[AdapNormCmdQueue].numpending--;
+ atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
spin_unlock_irqrestore(q->lock, flags);
@@ -354,7 +354,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
* continue. The caller has already been notified that
* the fib timed out.
*/
- dev->queues->queue[AdapNormCmdQueue].numpending--;
+ atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
aac_fib_complete(fib);
@@ -389,8 +389,13 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
* NOTE: we cannot touch the fib after this
* call, because it may have been deallocated.
*/
- fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
- fib->callback(fib->callback_data, fib);
+ if (likely(fib->callback && fib->callback_data)) {
+ fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
+ fib->callback(fib->callback_data, fib);
+ } else {
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ }
} else {
unsigned long flagv;
dprintk((KERN_INFO "event_wait up\n"));
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index fdcdf9f781bc..9eec02733c86 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -56,7 +56,7 @@
#include "aacraid.h"
-#define AAC_DRIVER_VERSION "1.2-0"
+#define AAC_DRIVER_VERSION "1.2-1"
#ifndef AAC_DRIVER_BRANCH
#define AAC_DRIVER_BRANCH ""
#endif
@@ -251,27 +251,15 @@ static struct aac_driver_ident aac_drivers[] = {
* TODO: unify with aac_scsi_cmd().
*/
-static int aac_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int aac_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *cmd)
{
- struct Scsi_Host *host = cmd->device->host;
- struct aac_dev *dev = (struct aac_dev *)host->hostdata;
- u32 count = 0;
- cmd->scsi_done = done;
- for (; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
- struct fib * fib = &dev->fibs[count];
- struct scsi_cmnd * command;
- if (fib->hw_fib_va->header.XferState &&
- ((command = fib->callback_data)) &&
- (command == cmd) &&
- (cmd->SCp.phase == AAC_OWNER_FIRMWARE))
- return 0; /* Already owned by Adapter */
- }
+ int r = 0;
cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
- return (aac_scsi_cmd(cmd) ? FAILED : 0);
+ r = (aac_scsi_cmd(cmd) ? FAILED : 0);
+ return r;
}
-static DEF_SCSI_QCMD(aac_queuecommand)
-
/**
* aac_info - Returns the host adapter name
* @shost: Scsi host to report on
@@ -713,7 +701,9 @@ static long aac_cfg_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
int ret;
- if (!capable(CAP_SYS_RAWIO))
+ struct aac_dev *aac;
+ aac = (struct aac_dev *)file->private_data;
+ if (!capable(CAP_SYS_RAWIO) || aac->adapter_shutdown)
return -EPERM;
mutex_lock(&aac_mutex);
ret = aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
@@ -1082,6 +1072,9 @@ static struct scsi_host_template aac_driver_template = {
static void __aac_shutdown(struct aac_dev * aac)
{
+ int i;
+ int cpu;
+
if (aac->aif_thread) {
int i;
/* Clear out events first */
@@ -1095,9 +1088,37 @@ static void __aac_shutdown(struct aac_dev * aac)
}
aac_send_shutdown(aac);
aac_adapter_disable_int(aac);
- free_irq(aac->pdev->irq, aac);
+ cpu = cpumask_first(cpu_online_mask);
+ if (aac->pdev->device == PMC_DEVICE_S6 ||
+ aac->pdev->device == PMC_DEVICE_S7 ||
+ aac->pdev->device == PMC_DEVICE_S8 ||
+ aac->pdev->device == PMC_DEVICE_S9) {
+ if (aac->max_msix > 1) {
+ for (i = 0; i < aac->max_msix; i++) {
+ if (irq_set_affinity_hint(
+ aac->msixentry[i].vector,
+ NULL)) {
+ printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
+ aac->name,
+ aac->id,
+ cpu);
+ }
+ cpu = cpumask_next(cpu,
+ cpu_online_mask);
+ free_irq(aac->msixentry[i].vector,
+ &(aac->aac_msix[i]));
+ }
+ } else {
+ free_irq(aac->pdev->irq,
+ &(aac->aac_msix[0]));
+ }
+ } else {
+ free_irq(aac->pdev->irq, aac);
+ }
if (aac->msi)
pci_disable_msi(aac->pdev);
+ else if (aac->max_msix > 1)
+ pci_disable_msix(aac->pdev);
}
static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 5c6a8703f535..9570612b80ce 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -400,16 +400,13 @@ int aac_rx_deliver_producer(struct fib * fib)
{
struct aac_dev *dev = fib->dev;
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
- unsigned long qflags;
u32 Index;
unsigned long nointr = 0;
- spin_lock_irqsave(q->lock, qflags);
aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr);
- q->numpending++;
+ atomic_inc(&q->numpending);
*(q->headers.producer) = cpu_to_le32(Index + 1);
- spin_unlock_irqrestore(q->lock, qflags);
if (!(nointr & aac_config.irq_mod))
aac_adapter_notify(dev, AdapNormCmdQueue);
@@ -426,15 +423,12 @@ static int aac_rx_deliver_message(struct fib * fib)
{
struct aac_dev *dev = fib->dev;
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
- unsigned long qflags;
u32 Index;
u64 addr;
volatile void __iomem *device;
unsigned long count = 10000000L; /* 50 seconds */
- spin_lock_irqsave(q->lock, qflags);
- q->numpending++;
- spin_unlock_irqrestore(q->lock, qflags);
+ atomic_inc(&q->numpending);
for(;;) {
Index = rx_readl(dev, MUnit.InboundQueue);
if (unlikely(Index == 0xFFFFFFFFL))
@@ -442,9 +436,7 @@ static int aac_rx_deliver_message(struct fib * fib)
if (likely(Index != 0xFFFFFFFFL))
break;
if (--count == 0) {
- spin_lock_irqsave(q->lock, qflags);
- q->numpending--;
- spin_unlock_irqrestore(q->lock, qflags);
+ atomic_dec(&q->numpending);
return -ETIMEDOUT;
}
udelay(5);
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 9c65aed26212..4596e9dd757c 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -44,98 +44,128 @@
#include "aacraid.h"
-static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
+static int aac_src_get_sync_status(struct aac_dev *dev);
+
+irqreturn_t aac_src_intr_message(int irq, void *dev_id)
{
- struct aac_dev *dev = dev_id;
+ struct aac_msix_ctx *ctx;
+ struct aac_dev *dev;
unsigned long bellbits, bellbits_shifted;
- int our_interrupt = 0;
- int isFastResponse;
+ int vector_no;
+ int isFastResponse, mode;
u32 index, handle;
- bellbits = src_readl(dev, MUnit.ODR_R);
- if (bellbits & PmDoorBellResponseSent) {
- bellbits = PmDoorBellResponseSent;
- /* handle async. status */
- src_writel(dev, MUnit.ODR_C, bellbits);
- src_readl(dev, MUnit.ODR_C);
- our_interrupt = 1;
- index = dev->host_rrq_idx;
- for (;;) {
- isFastResponse = 0;
- /* remove toggle bit (31) */
- handle = le32_to_cpu(dev->host_rrq[index]) & 0x7fffffff;
- /* check fast response bit (30) */
- if (handle & 0x40000000)
- isFastResponse = 1;
- handle &= 0x0000ffff;
- if (handle == 0)
- break;
-
- aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
-
- dev->host_rrq[index++] = 0;
- if (index == dev->scsi_host_ptr->can_queue +
- AAC_NUM_MGT_FIB)
- index = 0;
- dev->host_rrq_idx = index;
+ ctx = (struct aac_msix_ctx *)dev_id;
+ dev = ctx->dev;
+ vector_no = ctx->vector_no;
+
+ if (dev->msi_enabled) {
+ mode = AAC_INT_MODE_MSI;
+ if (vector_no == 0) {
+ bellbits = src_readl(dev, MUnit.ODR_MSI);
+ if (bellbits & 0x40000)
+ mode |= AAC_INT_MODE_AIF;
+ if (bellbits & 0x1000)
+ mode |= AAC_INT_MODE_SYNC;
}
} else {
- bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
- if (bellbits_shifted & DoorBellAifPending) {
+ mode = AAC_INT_MODE_INTX;
+ bellbits = src_readl(dev, MUnit.ODR_R);
+ if (bellbits & PmDoorBellResponseSent) {
+ bellbits = PmDoorBellResponseSent;
+ src_writel(dev, MUnit.ODR_C, bellbits);
+ src_readl(dev, MUnit.ODR_C);
+ } else {
+ bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
src_writel(dev, MUnit.ODR_C, bellbits);
src_readl(dev, MUnit.ODR_C);
- our_interrupt = 1;
- /* handle AIF */
- aac_intr_normal(dev, 0, 2, 0, NULL);
- } else if (bellbits_shifted & OUTBOUNDDOORBELL_0) {
- unsigned long sflags;
- struct list_head *entry;
- int send_it = 0;
- extern int aac_sync_mode;
+ if (bellbits_shifted & DoorBellAifPending)
+ mode |= AAC_INT_MODE_AIF;
+ else if (bellbits_shifted & OUTBOUNDDOORBELL_0)
+ mode |= AAC_INT_MODE_SYNC;
+ }
+ }
+
+ if (mode & AAC_INT_MODE_SYNC) {
+ unsigned long sflags;
+ struct list_head *entry;
+ int send_it = 0;
+ extern int aac_sync_mode;
+
+ if (!aac_sync_mode && !dev->msi_enabled) {
src_writel(dev, MUnit.ODR_C, bellbits);
src_readl(dev, MUnit.ODR_C);
+ }
- if (!aac_sync_mode) {
- src_writel(dev, MUnit.ODR_C, bellbits);
- src_readl(dev, MUnit.ODR_C);
- our_interrupt = 1;
+ if (dev->sync_fib) {
+ if (dev->sync_fib->callback)
+ dev->sync_fib->callback(dev->sync_fib->callback_data,
+ dev->sync_fib);
+ spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
+ if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
+ dev->management_fib_count--;
+ up(&dev->sync_fib->event_wait);
}
-
- if (dev->sync_fib) {
- our_interrupt = 1;
- if (dev->sync_fib->callback)
- dev->sync_fib->callback(dev->sync_fib->callback_data,
- dev->sync_fib);
- spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
- if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
- dev->management_fib_count--;
- up(&dev->sync_fib->event_wait);
- }
- spin_unlock_irqrestore(&dev->sync_fib->event_lock, sflags);
- spin_lock_irqsave(&dev->sync_lock, sflags);
- if (!list_empty(&dev->sync_fib_list)) {
- entry = dev->sync_fib_list.next;
- dev->sync_fib = list_entry(entry, struct fib, fiblink);
- list_del(entry);
- send_it = 1;
- } else {
- dev->sync_fib = NULL;
- }
- spin_unlock_irqrestore(&dev->sync_lock, sflags);
- if (send_it) {
- aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
- (u32)dev->sync_fib->hw_fib_pa, 0, 0, 0, 0, 0,
- NULL, NULL, NULL, NULL, NULL);
- }
+ spin_unlock_irqrestore(&dev->sync_fib->event_lock,
+ sflags);
+ spin_lock_irqsave(&dev->sync_lock, sflags);
+ if (!list_empty(&dev->sync_fib_list)) {
+ entry = dev->sync_fib_list.next;
+ dev->sync_fib = list_entry(entry,
+ struct fib,
+ fiblink);
+ list_del(entry);
+ send_it = 1;
+ } else {
+ dev->sync_fib = NULL;
+ }
+ spin_unlock_irqrestore(&dev->sync_lock, sflags);
+ if (send_it) {
+ aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
+ (u32)dev->sync_fib->hw_fib_pa,
+ 0, 0, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
}
}
+ if (!dev->msi_enabled)
+ mode = 0;
+
+ }
+
+ if (mode & AAC_INT_MODE_AIF) {
+ /* handle AIF */
+ aac_intr_normal(dev, 0, 2, 0, NULL);
+ if (dev->msi_enabled)
+ aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
+ mode = 0;
}
- if (our_interrupt) {
- return IRQ_HANDLED;
+ if (mode) {
+ index = dev->host_rrq_idx[vector_no];
+
+ for (;;) {
+ isFastResponse = 0;
+ /* remove toggle bit (31) */
+ handle = (dev->host_rrq[index] & 0x7fffffff);
+ /* check fast response bit (30) */
+ if (handle & 0x40000000)
+ isFastResponse = 1;
+ handle &= 0x0000ffff;
+ if (handle == 0)
+ break;
+ if (dev->msi_enabled && dev->max_msix > 1)
+ atomic_dec(&dev->rrq_outstanding[vector_no]);
+ aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
+ dev->host_rrq[index++] = 0;
+ if (index == (vector_no + 1) * dev->vector_cap)
+ index = vector_no * dev->vector_cap;
+ dev->host_rrq_idx[vector_no] = index;
+ }
+ mode = 0;
}
- return IRQ_NONE;
+
+ return IRQ_HANDLED;
}
/**
@@ -155,7 +185,7 @@ static void aac_src_disable_interrupt(struct aac_dev *dev)
static void aac_src_enable_interrupt_message(struct aac_dev *dev)
{
- src_writel(dev, MUnit.OIMR, dev->OIMR = 0xfffffff8);
+ aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT);
}
/**
@@ -174,6 +204,7 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command,
u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
{
unsigned long start;
+ unsigned long delay;
int ok;
/*
@@ -191,7 +222,10 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command,
/*
* Clear the synch command doorbell to start on a clean slate.
*/
- src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+ if (!dev->msi_enabled)
+ src_writel(dev,
+ MUnit.ODR_C,
+ OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
/*
* Disable doorbell interrupts
@@ -213,19 +247,29 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command,
ok = 0;
start = jiffies;
- /*
- * Wait up to 5 minutes
- */
- while (time_before(jiffies, start+300*HZ)) {
+ if (command == IOP_RESET_ALWAYS) {
+ /* Wait up to 10 sec */
+ delay = 10*HZ;
+ } else {
+ /* Wait up to 5 minutes */
+ delay = 300*HZ;
+ }
+ while (time_before(jiffies, start+delay)) {
udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
/*
* Mon960 will set doorbell0 bit when it has completed the command.
*/
- if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) {
+ if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
/*
* Clear the doorbell.
*/
- src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+ if (dev->msi_enabled)
+ aac_src_access_devreg(dev,
+ AAC_CLEAR_SYNC_BIT);
+ else
+ src_writel(dev,
+ MUnit.ODR_C,
+ OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
ok = 1;
break;
}
@@ -254,11 +298,16 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command,
*r3 = readl(&dev->IndexRegs->Mailbox[3]);
if (r4)
*r4 = readl(&dev->IndexRegs->Mailbox[4]);
-
+ if (command == GET_COMM_PREFERRED_SETTINGS)
+ dev->max_msix =
+ readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF;
/*
* Clear the synch command doorbell.
*/
- src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+ if (!dev->msi_enabled)
+ src_writel(dev,
+ MUnit.ODR_C,
+ OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
}
/*
@@ -335,9 +384,14 @@ static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
static void aac_src_start_adapter(struct aac_dev *dev)
{
struct aac_init *init;
+ int i;
/* reset host_rrq_idx first */
- dev->host_rrq_idx = 0;
+ for (i = 0; i < dev->max_msix; i++) {
+ dev->host_rrq_idx[i] = i * dev->vector_cap;
+ atomic_set(&dev->rrq_outstanding[i], 0);
+ }
+ dev->fibs_pushed_no = 0;
init = dev->init;
init->HostElapsedSeconds = cpu_to_le32(get_seconds());
@@ -390,15 +444,39 @@ static int aac_src_deliver_message(struct fib *fib)
{
struct aac_dev *dev = fib->dev;
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
- unsigned long qflags;
u32 fibsize;
dma_addr_t address;
struct aac_fib_xporthdr *pFibX;
u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
- spin_lock_irqsave(q->lock, qflags);
- q->numpending++;
- spin_unlock_irqrestore(q->lock, qflags);
+ atomic_inc(&q->numpending);
+
+ if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
+ dev->max_msix > 1) {
+ u_int16_t vector_no, first_choice = 0xffff;
+
+ vector_no = dev->fibs_pushed_no % dev->max_msix;
+ do {
+ vector_no += 1;
+ if (vector_no == dev->max_msix)
+ vector_no = 1;
+ if (atomic_read(&dev->rrq_outstanding[vector_no]) <
+ dev->vector_cap)
+ break;
+ if (0xffff == first_choice)
+ first_choice = vector_no;
+ else if (vector_no == first_choice)
+ break;
+ } while (1);
+ if (vector_no == first_choice)
+ vector_no = 0;
+ atomic_inc(&dev->rrq_outstanding[vector_no]);
+ if (dev->fibs_pushed_no == 0xffffffff)
+ dev->fibs_pushed_no = 0;
+ else
+ dev->fibs_pushed_no++;
+ fib->hw_fib_va->header.Handle += (vector_no << 16);
+ }
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
/* Calculate the amount to the fibsize bits */
@@ -498,15 +576,34 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled)
if (bled)
printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
dev->name, dev->id, bled);
+ dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL);
- if (bled || (var != 0x00000001))
- return -EINVAL;
- if (dev->supplement_adapter_info.SupportedOptions2 &
- AAC_OPTION_DOORBELL_RESET) {
- src_writel(dev, MUnit.IDR, reset_mask);
+ if ((bled || (var != 0x00000001)) &&
+ !dev->doorbell_mask)
+ return -EINVAL;
+ else if (dev->doorbell_mask) {
+ reset_mask = dev->doorbell_mask;
+ bled = 0;
+ var = 0x00000001;
+ }
+
+ if ((dev->pdev->device == PMC_DEVICE_S7 ||
+ dev->pdev->device == PMC_DEVICE_S8 ||
+ dev->pdev->device == PMC_DEVICE_S9) && dev->msi_enabled) {
+ aac_src_access_devreg(dev, AAC_ENABLE_INTX);
+ dev->msi_enabled = 0;
msleep(5000); /* Delay 5 seconds */
}
+
+ if (!bled && (dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_DOORBELL_RESET)) {
+ src_writel(dev, MUnit.IDR, reset_mask);
+ ssleep(45);
+ } else {
+ src_writel(dev, MUnit.IDR, 0x100);
+ ssleep(45);
+ }
}
if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
@@ -527,7 +624,6 @@ int aac_src_select_comm(struct aac_dev *dev, int comm)
{
switch (comm) {
case AAC_COMM_MESSAGE:
- dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
dev->a_ops.adapter_intr = aac_src_intr_message;
dev->a_ops.adapter_deliver = aac_src_deliver_message;
break;
@@ -625,6 +721,7 @@ int aac_src_init(struct aac_dev *dev)
*/
dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
+ dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
dev->a_ops.adapter_notify = aac_src_notify_adapter;
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
dev->a_ops.adapter_check_health = aac_src_check_health;
@@ -646,8 +743,11 @@ int aac_src_init(struct aac_dev *dev)
dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
+ dev->aac_msix[0].vector_no = 0;
+ dev->aac_msix[0].dev = dev;
+
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
- IRQF_SHARED, "aacraid", dev) < 0) {
+ IRQF_SHARED, "aacraid", &(dev->aac_msix[0])) < 0) {
if (dev->msi)
pci_disable_msi(dev->pdev);
@@ -659,6 +759,7 @@ int aac_src_init(struct aac_dev *dev)
dev->dbg_base = pci_resource_start(dev->pdev, 2);
dev->dbg_base_mapped = dev->regs.src.bar1;
dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
+ dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
aac_adapter_enable_int(dev);
@@ -688,7 +789,9 @@ int aac_srcv_init(struct aac_dev *dev)
unsigned long status;
int restart = 0;
int instance = dev->id;
+ int i, j;
const char *name = dev->name;
+ int cpu;
dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
dev->a_ops.adapter_comm = aac_src_select_comm;
@@ -784,6 +887,7 @@ int aac_srcv_init(struct aac_dev *dev)
*/
dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
+ dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
dev->a_ops.adapter_notify = aac_src_notify_adapter;
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
dev->a_ops.adapter_check_health = aac_src_check_health;
@@ -802,18 +906,54 @@ int aac_srcv_init(struct aac_dev *dev)
goto error_iounmap;
if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2)
goto error_iounmap;
- dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
- if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
- IRQF_SHARED, "aacraid", dev) < 0) {
- if (dev->msi)
- pci_disable_msi(dev->pdev);
- printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
- name, instance);
- goto error_iounmap;
+ if (dev->msi_enabled)
+ aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
+ if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < dev->max_msix; i++) {
+ dev->aac_msix[i].vector_no = i;
+ dev->aac_msix[i].dev = dev;
+
+ if (request_irq(dev->msixentry[i].vector,
+ dev->a_ops.adapter_intr,
+ 0,
+ "aacraid",
+ &(dev->aac_msix[i]))) {
+ printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
+ name, instance, i);
+ for (j = 0 ; j < i ; j++)
+ free_irq(dev->msixentry[j].vector,
+ &(dev->aac_msix[j]));
+ pci_disable_msix(dev->pdev);
+ goto error_iounmap;
+ }
+ if (irq_set_affinity_hint(
+ dev->msixentry[i].vector,
+ get_cpu_mask(cpu))) {
+ printk(KERN_ERR "%s%d: Failed to set IRQ affinity for cpu %d\n",
+ name, instance, cpu);
+ }
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+ } else {
+ dev->aac_msix[0].vector_no = 0;
+ dev->aac_msix[0].dev = dev;
+
+ if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+ IRQF_SHARED,
+ "aacraid",
+ &(dev->aac_msix[0])) < 0) {
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
+ printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
+ name, instance);
+ goto error_iounmap;
+ }
}
dev->dbg_base = dev->base_start;
dev->dbg_base_mapped = dev->base;
dev->dbg_size = dev->base_size;
+ dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
aac_adapter_enable_int(dev);
@@ -831,3 +971,93 @@ error_iounmap:
return -1;
}
+void aac_src_access_devreg(struct aac_dev *dev, int mode)
+{
+ u_int32_t val;
+
+ switch (mode) {
+ case AAC_ENABLE_INTERRUPT:
+ src_writel(dev,
+ MUnit.OIMR,
+ dev->OIMR = (dev->msi_enabled ?
+ AAC_INT_ENABLE_TYPE1_MSIX :
+ AAC_INT_ENABLE_TYPE1_INTX));
+ break;
+
+ case AAC_DISABLE_INTERRUPT:
+ src_writel(dev,
+ MUnit.OIMR,
+ dev->OIMR = AAC_INT_DISABLE_ALL);
+ break;
+
+ case AAC_ENABLE_MSIX:
+ /* set bit 6 */
+ val = src_readl(dev, MUnit.IDR);
+ val |= 0x40;
+ src_writel(dev, MUnit.IDR, val);
+ src_readl(dev, MUnit.IDR);
+ /* unmask int. */
+ val = PMC_ALL_INTERRUPT_BITS;
+ src_writel(dev, MUnit.IOAR, val);
+ val = src_readl(dev, MUnit.OIMR);
+ src_writel(dev,
+ MUnit.OIMR,
+ val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
+ break;
+
+ case AAC_DISABLE_MSIX:
+ /* reset bit 6 */
+ val = src_readl(dev, MUnit.IDR);
+ val &= ~0x40;
+ src_writel(dev, MUnit.IDR, val);
+ src_readl(dev, MUnit.IDR);
+ break;
+
+ case AAC_CLEAR_AIF_BIT:
+ /* set bit 5 */
+ val = src_readl(dev, MUnit.IDR);
+ val |= 0x20;
+ src_writel(dev, MUnit.IDR, val);
+ src_readl(dev, MUnit.IDR);
+ break;
+
+ case AAC_CLEAR_SYNC_BIT:
+ /* set bit 4 */
+ val = src_readl(dev, MUnit.IDR);
+ val |= 0x10;
+ src_writel(dev, MUnit.IDR, val);
+ src_readl(dev, MUnit.IDR);
+ break;
+
+ case AAC_ENABLE_INTX:
+ /* set bit 7 */
+ val = src_readl(dev, MUnit.IDR);
+ val |= 0x80;
+ src_writel(dev, MUnit.IDR, val);
+ src_readl(dev, MUnit.IDR);
+ /* unmask int. */
+ val = PMC_ALL_INTERRUPT_BITS;
+ src_writel(dev, MUnit.IOAR, val);
+ src_readl(dev, MUnit.IOAR);
+ val = src_readl(dev, MUnit.OIMR);
+ src_writel(dev, MUnit.OIMR,
+ val & (~(PMC_GLOBAL_INT_BIT2)));
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int aac_src_get_sync_status(struct aac_dev *dev)
+{
+
+ int val;
+
+ if (dev->msi_enabled)
+ val = src_readl(dev, MUnit.ODR_MSI) & 0x1000 ? 1 : 0;
+ else
+ val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT;
+
+ return val;
+}
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 770c48ddbe5e..ec432763a29a 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -1,28 +1,9 @@
-/* $Id: aha1542.c,v 1.1 1992/07/24 06:27:38 root Exp root $
- * linux/kernel/aha1542.c
+/*
+ * Driver for Adaptec AHA-1542 SCSI host adapters
*
* Copyright (C) 1992 Tommy Thorn
* Copyright (C) 1993, 1994, 1995 Eric Youngdale
- *
- * Modified by Eric Youngdale
- * Use request_irq and request_dma to help prevent unexpected conflicts
- * Set up on-board DMA controller, such that we do not have to
- * have the bios enabled to use the aha1542.
- * Modified by David Gentzel
- * Don't call request_dma if dma mask is 0 (for BusLogic BT-445S VL-Bus
- * controller).
- * Modified by Matti Aarnio
- * Accept parameters from LILO cmd-line. -- 1-Oct-94
- * Modified by Mike McLagan <mike.mclagan@linux.org>
- * Recognise extended mode on AHA1542CP, different bit than 1542CF
- * 1-Jan-97
- * Modified by Bjorn L. Thordarson and Einar Thor Einarsson
- * Recognize that DMA0 is valid DMA channel -- 13-Jul-98
- * Modified by Chris Faulhaber <jedgar@fxp.org>
- * Added module command-line options
- * 19-Jul-99
- * Modified by Adam Fritzler
- * Added proper detection of the AHA-1640 (MCA, now deleted)
+ * Copyright (C) 2015 Ondrej Zary
*/
#include <linux/module.h>
@@ -30,96 +11,44 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
-#include <linux/ioport.h>
#include <linux/delay.h>
-#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/spinlock.h>
-#include <linux/isapnp.h>
-#include <linux/blkdev.h>
+#include <linux/isa.h>
+#include <linux/pnp.h>
#include <linux/slab.h>
-
+#include <linux/io.h>
#include <asm/dma.h>
-#include <asm/io.h>
-
-#include "scsi.h"
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include "aha1542.h"
-#define SCSI_BUF_PA(address) isa_virt_to_bus(address)
-#define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset)
-
-#include <linux/stat.h>
-
-#ifdef DEBUG
-#define DEB(x) x
-#else
-#define DEB(x)
-#endif
-
-/*
- static const char RCSid[] = "$Header: /usr/src/linux/kernel/blk_drv/scsi/RCS/aha1542.c,v 1.1 1992/07/24 06:27:38 root Exp root $";
- */
-
-/* The adaptec can be configured for quite a number of addresses, but
- I generally do not want the card poking around at random. We allow
- two addresses - this allows people to use the Adaptec with a Midi
- card, which also used 0x330 -- can be overridden with LILO! */
-
-#define MAXBOARDS 4 /* Increase this and the sizes of the
- arrays below, if you need more.. */
-
-/* Boards 3,4 slots are reserved for ISAPnP scans */
-
-static unsigned int bases[MAXBOARDS] __initdata = {0x330, 0x334, 0, 0};
-
-/* set by aha1542_setup according to the command line; they also may
- be marked __initdata, but require zero initializers then */
-
-static int setup_called[MAXBOARDS];
-static int setup_buson[MAXBOARDS];
-static int setup_busoff[MAXBOARDS];
-static int setup_dmaspeed[MAXBOARDS] __initdata = { -1, -1, -1, -1 };
+#define MAXBOARDS 4
-/*
- * LILO/Module params: aha1542=<PORTBASE>[,<BUSON>,<BUSOFF>[,<DMASPEED>]]
- *
- * Where: <PORTBASE> is any of the valid AHA addresses:
- * 0x130, 0x134, 0x230, 0x234, 0x330, 0x334
- * <BUSON> is the time (in microsecs) that AHA spends on the AT-bus
- * when transferring data. 1542A power-on default is 11us,
- * valid values are in range: 2..15 (decimal)
- * <BUSOFF> is the time that AHA spends OFF THE BUS after while
- * it is transferring data (not to monopolize the bus).
- * Power-on default is 4us, valid range: 1..64 microseconds.
- * <DMASPEED> Default is jumper selected (1542A: on the J1),
- * but experimenter can alter it with this.
- * Valid values: 5, 6, 7, 8, 10 (MB/s)
- * Factory default is 5 MB/s.
- */
-
-#if defined(MODULE)
-static bool isapnp = 0;
-static int aha1542[] = {0x330, 11, 4, -1};
-module_param_array(aha1542, int, NULL, 0);
+static bool isapnp = 1;
module_param(isapnp, bool, 0);
+MODULE_PARM_DESC(isapnp, "enable PnP support (default=1)");
-static struct isapnp_device_id id_table[] __initdata = {
- {
- ISAPNP_ANY_ID, ISAPNP_ANY_ID,
- ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1542),
- 0
- },
- {0}
-};
+static int io[MAXBOARDS] = { 0x330, 0x334, 0, 0 };
+module_param_array(io, int, NULL, 0);
+MODULE_PARM_DESC(io, "base IO address of controller (0x130,0x134,0x230,0x234,0x330,0x334, default=0x330,0x334)");
-MODULE_DEVICE_TABLE(isapnp, id_table);
+/* time AHA spends on the AT-bus during data transfer */
+static int bus_on[MAXBOARDS] = { -1, -1, -1, -1 }; /* power-on default: 11us */
+module_param_array(bus_on, int, NULL, 0);
+MODULE_PARM_DESC(bus_on, "bus on time [us] (2-15, default=-1 [HW default: 11])");
-#else
-static int isapnp = 1;
-#endif
+/* time AHA spends off the bus (not to monopolize it) during data transfer */
+static int bus_off[MAXBOARDS] = { -1, -1, -1, -1 }; /* power-on default: 4us */
+module_param_array(bus_off, int, NULL, 0);
+MODULE_PARM_DESC(bus_off, "bus off time [us] (1-64, default=-1 [HW default: 4])");
+
+/* default is jumper selected (J1 on 1542A), factory default = 5 MB/s */
+static int dma_speed[MAXBOARDS] = { -1, -1, -1, -1 };
+module_param_array(dma_speed, int, NULL, 0);
+MODULE_PARM_DESC(dma_speed, "DMA speed [MB/s] (5,6,7,8,10, default=-1 [by jumper])");
-#define BIOS_TRANSLATION_1632 0 /* Used by some old 1542A boards */
#define BIOS_TRANSLATION_6432 1 /* Default case these days */
#define BIOS_TRANSLATION_25563 2 /* Big disk case */
@@ -128,134 +57,71 @@ struct aha1542_hostdata {
int bios_translation; /* Mapping bios uses - for compatibility */
int aha1542_last_mbi_used;
int aha1542_last_mbo_used;
- Scsi_Cmnd *SCint[AHA1542_MAILBOXES];
+ struct scsi_cmnd *int_cmds[AHA1542_MAILBOXES];
struct mailbox mb[2 * AHA1542_MAILBOXES];
struct ccb ccb[AHA1542_MAILBOXES];
};
-#define HOSTDATA(host) ((struct aha1542_hostdata *) &host->hostdata)
-
-static DEFINE_SPINLOCK(aha1542_lock);
-
-
-
-#define WAITnexttimeout 3000000
-
-static void setup_mailboxes(int base_io, struct Scsi_Host *shpnt);
-static int aha1542_restart(struct Scsi_Host *shost);
-static void aha1542_intr_handle(struct Scsi_Host *shost);
+static inline void aha1542_intr_reset(u16 base)
+{
+ outb(IRST, CONTROL(base));
+}
-#define aha1542_intr_reset(base) outb(IRST, CONTROL(base))
+static inline bool wait_mask(u16 port, u8 mask, u8 allof, u8 noneof, int timeout)
+{
+ bool delayed = true;
-#define WAIT(port, mask, allof, noneof) \
- { register int WAITbits; \
- register int WAITtimeout = WAITnexttimeout; \
- while (1) { \
- WAITbits = inb(port) & (mask); \
- if ((WAITbits & (allof)) == (allof) && ((WAITbits & (noneof)) == 0)) \
- break; \
- if (--WAITtimeout == 0) goto fail; \
- } \
- }
+ if (timeout == 0) {
+ timeout = 3000000;
+ delayed = false;
+ }
-/* Similar to WAIT, except we use the udelay call to regulate the
- amount of time we wait. */
-#define WAITd(port, mask, allof, noneof, timeout) \
- { register int WAITbits; \
- register int WAITtimeout = timeout; \
- while (1) { \
- WAITbits = inb(port) & (mask); \
- if ((WAITbits & (allof)) == (allof) && ((WAITbits & (noneof)) == 0)) \
- break; \
- mdelay(1); \
- if (--WAITtimeout == 0) goto fail; \
- } \
- }
+ while (1) {
+ u8 bits = inb(port) & mask;
+ if ((bits & allof) == allof && ((bits & noneof) == 0))
+ break;
+ if (delayed)
+ mdelay(1);
+ if (--timeout == 0)
+ return false;
+ }
-static void aha1542_stat(void)
-{
-/* int s = inb(STATUS), i = inb(INTRFLAGS);
- printk("status=%x intrflags=%x\n", s, i, WAITnexttimeout-WAITtimeout); */
+ return true;
}
-/* This is a bit complicated, but we need to make sure that an interrupt
- routine does not send something out while we are in the middle of this.
- Fortunately, it is only at boot time that multi-byte messages
- are ever sent. */
-static int aha1542_out(unsigned int base, unchar * cmdp, int len)
+static int aha1542_outb(unsigned int base, u8 val)
{
- unsigned long flags = 0;
- int got_lock;
-
- if (len == 1) {
- got_lock = 0;
- while (1 == 1) {
- WAIT(STATUS(base), CDF, 0, CDF);
- spin_lock_irqsave(&aha1542_lock, flags);
- if (inb(STATUS(base)) & CDF) {
- spin_unlock_irqrestore(&aha1542_lock, flags);
- continue;
- }
- outb(*cmdp, DATA(base));
- spin_unlock_irqrestore(&aha1542_lock, flags);
- return 0;
- }
- } else {
- spin_lock_irqsave(&aha1542_lock, flags);
- got_lock = 1;
- while (len--) {
- WAIT(STATUS(base), CDF, 0, CDF);
- outb(*cmdp++, DATA(base));
- }
- spin_unlock_irqrestore(&aha1542_lock, flags);
- }
+ if (!wait_mask(STATUS(base), CDF, 0, CDF, 0))
+ return 1;
+ outb(val, DATA(base));
+
return 0;
-fail:
- if (got_lock)
- spin_unlock_irqrestore(&aha1542_lock, flags);
- printk(KERN_ERR "aha1542_out failed(%d): ", len + 1);
- aha1542_stat();
- return 1;
}
-/* Only used at boot time, so we do not need to worry about latency as much
- here */
-
-static int __init aha1542_in(unsigned int base, unchar * cmdp, int len)
+static int aha1542_out(unsigned int base, u8 *buf, int len)
{
- unsigned long flags;
-
- spin_lock_irqsave(&aha1542_lock, flags);
while (len--) {
- WAIT(STATUS(base), DF, DF, 0);
- *cmdp++ = inb(DATA(base));
+ if (!wait_mask(STATUS(base), CDF, 0, CDF, 0))
+ return 1;
+ outb(*buf++, DATA(base));
}
- spin_unlock_irqrestore(&aha1542_lock, flags);
+ if (!wait_mask(INTRFLAGS(base), INTRMASK, HACC, 0, 0))
+ return 1;
+
return 0;
-fail:
- spin_unlock_irqrestore(&aha1542_lock, flags);
- printk(KERN_ERR "aha1542_in failed(%d): ", len + 1);
- aha1542_stat();
- return 1;
}
-/* Similar to aha1542_in, except that we wait a very short period of time.
- We use this if we know the board is alive and awake, but we are not sure
- if the board will respond to the command we are about to send or not */
-static int __init aha1542_in1(unsigned int base, unchar * cmdp, int len)
-{
- unsigned long flags;
+/* Only used at boot time, so we do not need to worry about latency as much
+ here */
- spin_lock_irqsave(&aha1542_lock, flags);
+static int aha1542_in(unsigned int base, u8 *buf, int len, int timeout)
+{
while (len--) {
- WAITd(STATUS(base), DF, DF, 0, 100);
- *cmdp++ = inb(DATA(base));
+ if (!wait_mask(STATUS(base), DF, DF, 0, timeout))
+ return 1;
+ *buf++ = inb(DATA(base));
}
- spin_unlock_irqrestore(&aha1542_lock, flags);
return 0;
-fail:
- spin_unlock_irqrestore(&aha1542_lock, flags);
- return 1;
}
static int makecode(unsigned hosterr, unsigned scsierr)
@@ -297,7 +163,9 @@ static int makecode(unsigned hosterr, unsigned scsierr)
case 0x1a: /* Invalid CCB or Segment List Parameter-A segment list with a zero
length segment or invalid segment list boundaries was received.
A CCB parameter was invalid. */
- DEB(printk("Aha1542: %x %x\n", hosterr, scsierr));
+#ifdef DEBUG
+ printk("Aha1542: %x %x\n", hosterr, scsierr);
+#endif
hosterr = DID_ERROR; /* Couldn't find any better */
break;
@@ -314,106 +182,74 @@ static int makecode(unsigned hosterr, unsigned scsierr)
return scsierr | (hosterr << 16);
}
-static int __init aha1542_test_port(int bse, struct Scsi_Host *shpnt)
+static int aha1542_test_port(struct Scsi_Host *sh)
{
- unchar inquiry_cmd[] = {CMD_INQUIRY};
- unchar inquiry_result[4];
- unchar *cmdp;
- int len;
- volatile int debug = 0;
+ u8 inquiry_result[4];
+ int i;
/* Quick and dirty test for presence of the card. */
- if (inb(STATUS(bse)) == 0xff)
+ if (inb(STATUS(sh->io_port)) == 0xff)
return 0;
/* Reset the adapter. I ought to make a hard reset, but it's not really necessary */
- /* DEB(printk("aha1542_test_port called \n")); */
-
/* In case some other card was probing here, reset interrupts */
- aha1542_intr_reset(bse); /* reset interrupts, so they don't block */
+ aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */
- outb(SRST | IRST /*|SCRST */ , CONTROL(bse));
+ outb(SRST | IRST /*|SCRST */ , CONTROL(sh->io_port));
mdelay(20); /* Wait a little bit for things to settle down. */
- debug = 1;
/* Expect INIT and IDLE, any of the others are bad */
- WAIT(STATUS(bse), STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF);
+ if (!wait_mask(STATUS(sh->io_port), STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF, 0))
+ return 0;
- debug = 2;
/* Shouldn't have generated any interrupts during reset */
- if (inb(INTRFLAGS(bse)) & INTRMASK)
- goto fail;
-
+ if (inb(INTRFLAGS(sh->io_port)) & INTRMASK)
+ return 0;
/* Perform a host adapter inquiry instead so we do not need to set
up the mailboxes ahead of time */
- aha1542_out(bse, inquiry_cmd, 1);
-
- debug = 3;
- len = 4;
- cmdp = &inquiry_result[0];
+ aha1542_outb(sh->io_port, CMD_INQUIRY);
- while (len--) {
- WAIT(STATUS(bse), DF, DF, 0);
- *cmdp++ = inb(DATA(bse));
+ for (i = 0; i < 4; i++) {
+ if (!wait_mask(STATUS(sh->io_port), DF, DF, 0, 0))
+ return 0;
+ inquiry_result[i] = inb(DATA(sh->io_port));
}
- debug = 8;
/* Reading port should reset DF */
- if (inb(STATUS(bse)) & DF)
- goto fail;
+ if (inb(STATUS(sh->io_port)) & DF)
+ return 0;
- debug = 9;
/* When HACC, command is completed, and we're though testing */
- WAIT(INTRFLAGS(bse), HACC, HACC, 0);
- /* now initialize adapter */
+ if (!wait_mask(INTRFLAGS(sh->io_port), HACC, HACC, 0, 0))
+ return 0;
- debug = 10;
/* Clear interrupts */
- outb(IRST, CONTROL(bse));
-
- debug = 11;
-
- return debug; /* 1 = ok */
-fail:
- return 0; /* 0 = not ok */
-}
+ outb(IRST, CONTROL(sh->io_port));
-/* A quick wrapper for do_aha1542_intr_handle to grab the spin lock */
-static irqreturn_t do_aha1542_intr_handle(int dummy, void *dev_id)
-{
- unsigned long flags;
- struct Scsi_Host *shost = dev_id;
-
- spin_lock_irqsave(shost->host_lock, flags);
- aha1542_intr_handle(shost);
- spin_unlock_irqrestore(shost->host_lock, flags);
- return IRQ_HANDLED;
+ return 1;
}
-/* A "high" level interrupt handler */
-static void aha1542_intr_handle(struct Scsi_Host *shost)
+static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
{
- void (*my_done) (Scsi_Cmnd *) = NULL;
+ struct Scsi_Host *sh = dev_id;
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
+ void (*my_done)(struct scsi_cmnd *) = NULL;
int errstatus, mbi, mbo, mbistatus;
int number_serviced;
unsigned long flags;
- Scsi_Cmnd *SCtmp;
+ struct scsi_cmnd *tmp_cmd;
int flag;
- int needs_restart;
- struct mailbox *mb;
- struct ccb *ccb;
-
- mb = HOSTDATA(shost)->mb;
- ccb = HOSTDATA(shost)->ccb;
+ struct mailbox *mb = aha1542->mb;
+ struct ccb *ccb = aha1542->ccb;
#ifdef DEBUG
{
- flag = inb(INTRFLAGS(shost->io_port));
- printk(KERN_DEBUG "aha1542_intr_handle: ");
+ flag = inb(INTRFLAGS(sh->io_port));
+ shost_printk(KERN_DEBUG, sh, "aha1542_intr_handle: ");
if (!(flag & ANYINTR))
printk("no interrupt?");
if (flag & MBIF)
@@ -424,14 +260,14 @@ static void aha1542_intr_handle(struct Scsi_Host *shost)
printk("HACC ");
if (flag & SCRD)
printk("SCRD ");
- printk("status %02x\n", inb(STATUS(shost->io_port)));
+ printk("status %02x\n", inb(STATUS(sh->io_port)));
};
#endif
number_serviced = 0;
- needs_restart = 0;
- while (1 == 1) {
- flag = inb(INTRFLAGS(shost->io_port));
+ spin_lock_irqsave(sh->host_lock, flags);
+ while (1) {
+ flag = inb(INTRFLAGS(sh->io_port));
/* Check for unusual interrupts. If any of these happen, we should
probably do something special, but for now just printing a message
@@ -442,15 +278,12 @@ static void aha1542_intr_handle(struct Scsi_Host *shost)
printk("MBOF ");
if (flag & HACC)
printk("HACC ");
- if (flag & SCRD) {
- needs_restart = 1;
+ if (flag & SCRD)
printk("SCRD ");
- }
}
- aha1542_intr_reset(shost->io_port);
+ aha1542_intr_reset(sh->io_port);
- spin_lock_irqsave(&aha1542_lock, flags);
- mbi = HOSTDATA(shost)->aha1542_last_mbi_used + 1;
+ mbi = aha1542->aha1542_last_mbi_used + 1;
if (mbi >= 2 * AHA1542_MAILBOXES)
mbi = AHA1542_MAILBOXES;
@@ -460,57 +293,51 @@ static void aha1542_intr_handle(struct Scsi_Host *shost)
mbi++;
if (mbi >= 2 * AHA1542_MAILBOXES)
mbi = AHA1542_MAILBOXES;
- } while (mbi != HOSTDATA(shost)->aha1542_last_mbi_used);
+ } while (mbi != aha1542->aha1542_last_mbi_used);
if (mb[mbi].status == 0) {
- spin_unlock_irqrestore(&aha1542_lock, flags);
+ spin_unlock_irqrestore(sh->host_lock, flags);
/* Hmm, no mail. Must have read it the last time around */
- if (!number_serviced && !needs_restart)
- printk(KERN_WARNING "aha1542.c: interrupt received, but no mail.\n");
- /* We detected a reset. Restart all pending commands for
- devices that use the hard reset option */
- if (needs_restart)
- aha1542_restart(shost);
- return;
+ if (!number_serviced)
+ shost_printk(KERN_WARNING, sh, "interrupt received, but no mail.\n");
+ return IRQ_HANDLED;
};
- mbo = (scsi2int(mb[mbi].ccbptr) - (SCSI_BUF_PA(&ccb[0]))) / sizeof(struct ccb);
+ mbo = (scsi2int(mb[mbi].ccbptr) - (isa_virt_to_bus(&ccb[0]))) / sizeof(struct ccb);
mbistatus = mb[mbi].status;
mb[mbi].status = 0;
- HOSTDATA(shost)->aha1542_last_mbi_used = mbi;
- spin_unlock_irqrestore(&aha1542_lock, flags);
+ aha1542->aha1542_last_mbi_used = mbi;
#ifdef DEBUG
- {
- if (ccb[mbo].tarstat | ccb[mbo].hastat)
- printk(KERN_DEBUG "aha1542_command: returning %x (status %d)\n",
- ccb[mbo].tarstat + ((int) ccb[mbo].hastat << 16), mb[mbi].status);
- };
+ if (ccb[mbo].tarstat | ccb[mbo].hastat)
+ shost_printk(KERN_DEBUG, sh, "aha1542_command: returning %x (status %d)\n",
+ ccb[mbo].tarstat + ((int) ccb[mbo].hastat << 16), mb[mbi].status);
#endif
if (mbistatus == 3)
continue; /* Aborted command not found */
#ifdef DEBUG
- printk(KERN_DEBUG "...done %d %d\n", mbo, mbi);
+ shost_printk(KERN_DEBUG, sh, "...done %d %d\n", mbo, mbi);
#endif
- SCtmp = HOSTDATA(shost)->SCint[mbo];
+ tmp_cmd = aha1542->int_cmds[mbo];
- if (!SCtmp || !SCtmp->scsi_done) {
- printk(KERN_WARNING "aha1542_intr_handle: Unexpected interrupt\n");
- printk(KERN_WARNING "tarstat=%x, hastat=%x idlun=%x ccb#=%d \n", ccb[mbo].tarstat,
+ if (!tmp_cmd || !tmp_cmd->scsi_done) {
+ spin_unlock_irqrestore(sh->host_lock, flags);
+ shost_printk(KERN_WARNING, sh, "Unexpected interrupt\n");
+ shost_printk(KERN_WARNING, sh, "tarstat=%x, hastat=%x idlun=%x ccb#=%d\n", ccb[mbo].tarstat,
ccb[mbo].hastat, ccb[mbo].idlun, mbo);
- return;
+ return IRQ_HANDLED;
}
- my_done = SCtmp->scsi_done;
- kfree(SCtmp->host_scribble);
- SCtmp->host_scribble = NULL;
+ my_done = tmp_cmd->scsi_done;
+ kfree(tmp_cmd->host_scribble);
+ tmp_cmd->host_scribble = NULL;
/* Fetch the sense data, and tuck it away, in the required slot. The
Adaptec automatically fetches it, and there is no guarantee that
we will still have it in the cdb when we come back */
if (ccb[mbo].tarstat == 2)
- memcpy(SCtmp->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen],
+ memcpy(tmp_cmd->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen],
SCSI_SENSE_BUFFERSIZE);
@@ -525,166 +352,122 @@ static void aha1542_intr_handle(struct Scsi_Host *shost)
#ifdef DEBUG
if (errstatus)
- printk(KERN_DEBUG "(aha1542 error:%x %x %x) ", errstatus,
+ shost_printk(KERN_DEBUG, sh, "(aha1542 error:%x %x %x) ", errstatus,
ccb[mbo].hastat, ccb[mbo].tarstat);
+ if (ccb[mbo].tarstat == 2)
+ print_hex_dump_bytes("sense: ", DUMP_PREFIX_NONE, &ccb[mbo].cdb[ccb[mbo].cdblen], 12);
+ if (errstatus)
+ printk("aha1542_intr_handle: returning %6x\n", errstatus);
#endif
-
- if (ccb[mbo].tarstat == 2) {
-#ifdef DEBUG
- int i;
-#endif
- DEB(printk("aha1542_intr_handle: sense:"));
-#ifdef DEBUG
- for (i = 0; i < 12; i++)
- printk("%02x ", ccb[mbo].cdb[ccb[mbo].cdblen + i]);
- printk("\n");
-#endif
- /*
- DEB(printk("aha1542_intr_handle: buf:"));
- for (i = 0; i < bufflen; i++)
- printk("%02x ", ((unchar *)buff)[i]);
- printk("\n");
- */
- }
- DEB(if (errstatus) printk("aha1542_intr_handle: returning %6x\n", errstatus));
- SCtmp->result = errstatus;
- HOSTDATA(shost)->SCint[mbo] = NULL; /* This effectively frees up the mailbox slot, as
- far as queuecommand is concerned */
- my_done(SCtmp);
+ tmp_cmd->result = errstatus;
+ aha1542->int_cmds[mbo] = NULL; /* This effectively frees up the mailbox slot, as
+ far as queuecommand is concerned */
+ my_done(tmp_cmd);
number_serviced++;
};
}
-static int aha1542_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
+static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
{
- unchar ahacmd = CMD_START_SCSI;
- unchar direction;
- unchar *cmd = (unchar *) SCpnt->cmnd;
- unchar target = SCpnt->device->id;
- unchar lun = SCpnt->device->lun;
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
+ u8 direction;
+ u8 target = cmd->device->id;
+ u8 lun = cmd->device->lun;
unsigned long flags;
- int bufflen = scsi_bufflen(SCpnt);
+ int bufflen = scsi_bufflen(cmd);
int mbo;
- struct mailbox *mb;
- struct ccb *ccb;
+ struct mailbox *mb = aha1542->mb;
+ struct ccb *ccb = aha1542->ccb;
- DEB(int i);
-
- mb = HOSTDATA(SCpnt->device->host)->mb;
- ccb = HOSTDATA(SCpnt->device->host)->ccb;
-
- DEB(if (target > 1) {
- SCpnt->result = DID_TIME_OUT << 16;
- done(SCpnt); return 0;
- }
- );
-
- if (*cmd == REQUEST_SENSE) {
+ if (*cmd->cmnd == REQUEST_SENSE) {
/* Don't do the command - we have the sense data already */
-#if 0
- /* scsi_request_sense() provides a buffer of size 256,
- so there is no reason to expect equality */
- if (bufflen != SCSI_SENSE_BUFFERSIZE)
- printk(KERN_CRIT "aha1542: Wrong buffer length supplied "
- "for request sense (%d)\n", bufflen);
-#endif
- SCpnt->result = 0;
- done(SCpnt);
+ cmd->result = 0;
+ cmd->scsi_done(cmd);
return 0;
}
#ifdef DEBUG
- if (*cmd == READ_10 || *cmd == WRITE_10)
- i = xscsi2int(cmd + 2);
- else if (*cmd == READ_6 || *cmd == WRITE_6)
- i = scsi2int(cmd + 2);
- else
- i = -1;
- if (done)
- printk(KERN_DEBUG "aha1542_queuecommand: dev %d cmd %02x pos %d len %d ", target, *cmd, i, bufflen);
- else
- printk(KERN_DEBUG "aha1542_command: dev %d cmd %02x pos %d len %d ", target, *cmd, i, bufflen);
- aha1542_stat();
- printk(KERN_DEBUG "aha1542_queuecommand: dumping scsi cmd:");
- for (i = 0; i < SCpnt->cmd_len; i++)
- printk("%02x ", cmd[i]);
- printk("\n");
- if (*cmd == WRITE_10 || *cmd == WRITE_6)
- return 0; /* we are still testing, so *don't* write */
+ {
+ int i = -1;
+ if (*cmd->cmnd == READ_10 || *cmd->cmnd == WRITE_10)
+ i = xscsi2int(cmd->cmnd + 2);
+ else if (*cmd->cmnd == READ_6 || *cmd->cmnd == WRITE_6)
+ i = scsi2int(cmd->cmnd + 2);
+ shost_printk(KERN_DEBUG, sh, "aha1542_queuecommand: dev %d cmd %02x pos %d len %d",
+ target, *cmd->cmnd, i, bufflen);
+ print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len);
+ }
#endif
/* Use the outgoing mailboxes in a round-robin fashion, because this
is how the host adapter will scan for them */
- spin_lock_irqsave(&aha1542_lock, flags);
- mbo = HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used + 1;
+ spin_lock_irqsave(sh->host_lock, flags);
+ mbo = aha1542->aha1542_last_mbo_used + 1;
if (mbo >= AHA1542_MAILBOXES)
mbo = 0;
do {
- if (mb[mbo].status == 0 && HOSTDATA(SCpnt->device->host)->SCint[mbo] == NULL)
+ if (mb[mbo].status == 0 && aha1542->int_cmds[mbo] == NULL)
break;
mbo++;
if (mbo >= AHA1542_MAILBOXES)
mbo = 0;
- } while (mbo != HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used);
+ } while (mbo != aha1542->aha1542_last_mbo_used);
- if (mb[mbo].status || HOSTDATA(SCpnt->device->host)->SCint[mbo])
+ if (mb[mbo].status || aha1542->int_cmds[mbo])
panic("Unable to find empty mailbox for aha1542.\n");
- HOSTDATA(SCpnt->device->host)->SCint[mbo] = SCpnt; /* This will effectively prevent someone else from
- screwing with this cdb. */
+ aha1542->int_cmds[mbo] = cmd; /* This will effectively prevent someone else from
+ screwing with this cdb. */
- HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used = mbo;
- spin_unlock_irqrestore(&aha1542_lock, flags);
+ aha1542->aha1542_last_mbo_used = mbo;
#ifdef DEBUG
- printk(KERN_DEBUG "Sending command (%d %x)...", mbo, done);
+ shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done);
#endif
- any2scsi(mb[mbo].ccbptr, SCSI_BUF_PA(&ccb[mbo])); /* This gets trashed for some reason */
+ any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo])); /* This gets trashed for some reason */
memset(&ccb[mbo], 0, sizeof(struct ccb));
- ccb[mbo].cdblen = SCpnt->cmd_len;
+ ccb[mbo].cdblen = cmd->cmd_len;
direction = 0;
- if (*cmd == READ_10 || *cmd == READ_6)
+ if (*cmd->cmnd == READ_10 || *cmd->cmnd == READ_6)
direction = 8;
- else if (*cmd == WRITE_10 || *cmd == WRITE_6)
+ else if (*cmd->cmnd == WRITE_10 || *cmd->cmnd == WRITE_6)
direction = 16;
- memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen);
+ memcpy(ccb[mbo].cdb, cmd->cmnd, ccb[mbo].cdblen);
if (bufflen) {
struct scatterlist *sg;
struct chain *cptr;
-#ifdef DEBUG
- unsigned char *ptr;
-#endif
- int i, sg_count = scsi_sg_count(SCpnt);
+ int i, sg_count = scsi_sg_count(cmd);
+
ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
- SCpnt->host_scribble = kmalloc(sizeof(*cptr)*sg_count,
+ cmd->host_scribble = kmalloc(sizeof(*cptr)*sg_count,
GFP_KERNEL | GFP_DMA);
- cptr = (struct chain *) SCpnt->host_scribble;
+ cptr = (struct chain *) cmd->host_scribble;
if (cptr == NULL) {
/* free the claimed mailbox slot */
- HOSTDATA(SCpnt->device->host)->SCint[mbo] = NULL;
+ aha1542->int_cmds[mbo] = NULL;
+ spin_unlock_irqrestore(sh->host_lock, flags);
return SCSI_MLQUEUE_HOST_BUSY;
}
- scsi_for_each_sg(SCpnt, sg, sg_count, i) {
- any2scsi(cptr[i].dataptr, SCSI_SG_PA(sg));
+ scsi_for_each_sg(cmd, sg, sg_count, i) {
+ any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg))
+ + sg->offset);
any2scsi(cptr[i].datalen, sg->length);
};
any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain));
- any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(cptr));
+ any2scsi(ccb[mbo].dataptr, isa_virt_to_bus(cptr));
#ifdef DEBUG
- printk("cptr %x: ", cptr);
- ptr = (unsigned char *) cptr;
- for (i = 0; i < 18; i++)
- printk("%02x ", ptr[i]);
+ shost_printk(KERN_DEBUG, sh, "cptr %p: ", cptr);
+ print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, cptr, 18);
#endif
} else {
ccb[mbo].op = 0; /* SCSI Initiator Command */
- SCpnt->host_scribble = NULL;
+ cmd->host_scribble = NULL;
any2scsi(ccb[mbo].datalen, 0);
any2scsi(ccb[mbo].dataptr, 0);
};
@@ -694,139 +477,116 @@ static int aha1542_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *
ccb[mbo].commlinkid = 0;
#ifdef DEBUG
- {
- int i;
- printk(KERN_DEBUG "aha1542_command: sending.. ");
- for (i = 0; i < sizeof(ccb[mbo]) - 10; i++)
- printk("%02x ", ((unchar *) & ccb[mbo])[i]);
- };
+ print_hex_dump_bytes("sending: ", DUMP_PREFIX_NONE, &ccb[mbo], sizeof(ccb[mbo]) - 10);
+ printk("aha1542_queuecommand: now waiting for interrupt ");
#endif
-
- if (done) {
- DEB(printk("aha1542_queuecommand: now waiting for interrupt ");
- aha1542_stat());
- SCpnt->scsi_done = done;
- mb[mbo].status = 1;
- aha1542_out(SCpnt->device->host->io_port, &ahacmd, 1); /* start scsi command */
- DEB(aha1542_stat());
- } else
- printk("aha1542_queuecommand: done can't be NULL\n");
+ mb[mbo].status = 1;
+ aha1542_outb(cmd->device->host->io_port, CMD_START_SCSI);
+ spin_unlock_irqrestore(sh->host_lock, flags);
return 0;
}
-static DEF_SCSI_QCMD(aha1542_queuecommand)
-
/* Initialize mailboxes */
-static void setup_mailboxes(int bse, struct Scsi_Host *shpnt)
+static void setup_mailboxes(struct Scsi_Host *sh)
{
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
int i;
- struct mailbox *mb;
- struct ccb *ccb;
-
- unchar cmd[5] = { CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0};
+ struct mailbox *mb = aha1542->mb;
+ struct ccb *ccb = aha1542->ccb;
- mb = HOSTDATA(shpnt)->mb;
- ccb = HOSTDATA(shpnt)->ccb;
+ u8 mb_cmd[5] = { CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0};
for (i = 0; i < AHA1542_MAILBOXES; i++) {
mb[i].status = mb[AHA1542_MAILBOXES + i].status = 0;
- any2scsi(mb[i].ccbptr, SCSI_BUF_PA(&ccb[i]));
+ any2scsi(mb[i].ccbptr, isa_virt_to_bus(&ccb[i]));
};
- aha1542_intr_reset(bse); /* reset interrupts, so they don't block */
- any2scsi((cmd + 2), SCSI_BUF_PA(mb));
- aha1542_out(bse, cmd, 5);
- WAIT(INTRFLAGS(bse), INTRMASK, HACC, 0);
- while (0) {
-fail:
- printk(KERN_ERR "aha1542_detect: failed setting up mailboxes\n");
- }
- aha1542_intr_reset(bse);
+ aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */
+ any2scsi((mb_cmd + 2), isa_virt_to_bus(mb));
+ if (aha1542_out(sh->io_port, mb_cmd, 5))
+ shost_printk(KERN_ERR, sh, "failed setting up mailboxes\n");
+ aha1542_intr_reset(sh->io_port);
}
-static int __init aha1542_getconfig(int base_io, unsigned char *irq_level, unsigned char *dma_chan, unsigned char *scsi_id)
+static int aha1542_getconfig(struct Scsi_Host *sh)
{
- unchar inquiry_cmd[] = {CMD_RETCONF};
- unchar inquiry_result[3];
+ u8 inquiry_result[3];
int i;
- i = inb(STATUS(base_io));
+ i = inb(STATUS(sh->io_port));
if (i & DF) {
- i = inb(DATA(base_io));
+ i = inb(DATA(sh->io_port));
};
- aha1542_out(base_io, inquiry_cmd, 1);
- aha1542_in(base_io, inquiry_result, 3);
- WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
- while (0) {
-fail:
- printk(KERN_ERR "aha1542_detect: query board settings\n");
- }
- aha1542_intr_reset(base_io);
+ aha1542_outb(sh->io_port, CMD_RETCONF);
+ aha1542_in(sh->io_port, inquiry_result, 3, 0);
+ if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0))
+ shost_printk(KERN_ERR, sh, "error querying board settings\n");
+ aha1542_intr_reset(sh->io_port);
switch (inquiry_result[0]) {
case 0x80:
- *dma_chan = 7;
+ sh->dma_channel = 7;
break;
case 0x40:
- *dma_chan = 6;
+ sh->dma_channel = 6;
break;
case 0x20:
- *dma_chan = 5;
+ sh->dma_channel = 5;
break;
case 0x01:
- *dma_chan = 0;
+ sh->dma_channel = 0;
break;
case 0:
/* This means that the adapter, although Adaptec 1542 compatible, doesn't use a DMA channel.
Currently only aware of the BusLogic BT-445S VL-Bus adapter which needs this. */
- *dma_chan = 0xFF;
+ sh->dma_channel = 0xFF;
break;
default:
- printk(KERN_ERR "Unable to determine Adaptec DMA priority. Disabling board\n");
+ shost_printk(KERN_ERR, sh, "Unable to determine DMA channel.\n");
return -1;
};
switch (inquiry_result[1]) {
case 0x40:
- *irq_level = 15;
+ sh->irq = 15;
break;
case 0x20:
- *irq_level = 14;
+ sh->irq = 14;
break;
case 0x8:
- *irq_level = 12;
+ sh->irq = 12;
break;
case 0x4:
- *irq_level = 11;
+ sh->irq = 11;
break;
case 0x2:
- *irq_level = 10;
+ sh->irq = 10;
break;
case 0x1:
- *irq_level = 9;
+ sh->irq = 9;
break;
default:
- printk(KERN_ERR "Unable to determine Adaptec IRQ level. Disabling board\n");
+ shost_printk(KERN_ERR, sh, "Unable to determine IRQ level.\n");
return -1;
};
- *scsi_id = inquiry_result[2] & 7;
+ sh->this_id = inquiry_result[2] & 7;
return 0;
}
/* This function should only be called for 1542C boards - we can detect
the special firmware settings and unlock the board */
-static int __init aha1542_mbenable(int base)
+static int aha1542_mbenable(struct Scsi_Host *sh)
{
- static unchar mbenable_cmd[3];
- static unchar mbenable_result[2];
+ static u8 mbenable_cmd[3];
+ static u8 mbenable_result[2];
int retval;
retval = BIOS_TRANSLATION_6432;
- mbenable_cmd[0] = CMD_EXTBIOS;
- aha1542_out(base, mbenable_cmd, 1);
- if (aha1542_in1(base, mbenable_result, 2))
+ aha1542_outb(sh->io_port, CMD_EXTBIOS);
+ if (aha1542_in(sh->io_port, mbenable_result, 2, 100))
return retval;
- WAITd(INTRFLAGS(base), INTRMASK, HACC, 0, 100);
- aha1542_intr_reset(base);
+ if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 100))
+ goto fail;
+ aha1542_intr_reset(sh->io_port);
if ((mbenable_result[0] & 0x08) || mbenable_result[1]) {
mbenable_cmd[0] = CMD_MBENABLE;
@@ -836,37 +596,34 @@ static int __init aha1542_mbenable(int base)
if ((mbenable_result[0] & 0x08) && (mbenable_result[1] & 0x03))
retval = BIOS_TRANSLATION_25563;
- aha1542_out(base, mbenable_cmd, 3);
- WAIT(INTRFLAGS(base), INTRMASK, HACC, 0);
+ if (aha1542_out(sh->io_port, mbenable_cmd, 3))
+ goto fail;
};
while (0) {
fail:
- printk(KERN_ERR "aha1542_mbenable: Mailbox init failed\n");
+ shost_printk(KERN_ERR, sh, "Mailbox init failed\n");
}
- aha1542_intr_reset(base);
+ aha1542_intr_reset(sh->io_port);
return retval;
}
/* Query the board to find out if it is a 1542 or a 1740, or whatever. */
-static int __init aha1542_query(int base_io, int *transl)
+static int aha1542_query(struct Scsi_Host *sh)
{
- unchar inquiry_cmd[] = {CMD_INQUIRY};
- unchar inquiry_result[4];
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
+ u8 inquiry_result[4];
int i;
- i = inb(STATUS(base_io));
+ i = inb(STATUS(sh->io_port));
if (i & DF) {
- i = inb(DATA(base_io));
+ i = inb(DATA(sh->io_port));
};
- aha1542_out(base_io, inquiry_cmd, 1);
- aha1542_in(base_io, inquiry_result, 4);
- WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
- while (0) {
-fail:
- printk(KERN_ERR "aha1542_detect: query card type\n");
- }
- aha1542_intr_reset(base_io);
+ aha1542_outb(sh->io_port, CMD_INQUIRY);
+ aha1542_in(sh->io_port, inquiry_result, 4, 0);
+ if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0))
+ shost_printk(KERN_ERR, sh, "error querying card type\n");
+ aha1542_intr_reset(sh->io_port);
- *transl = BIOS_TRANSLATION_6432; /* Default case */
+ aha1542->bios_translation = BIOS_TRANSLATION_6432; /* Default case */
/* For an AHA1740 series board, we ignore the board since there is a
hardware bug which can lead to wrong blocks being returned if the board
@@ -875,391 +632,198 @@ fail:
*/
if (inquiry_result[0] == 0x43) {
- printk(KERN_INFO "aha1542.c: Emulation mode not supported for AHA 174N hardware.\n");
+ shost_printk(KERN_INFO, sh, "Emulation mode not supported for AHA-1740 hardware, use aha1740 driver instead.\n");
return 1;
};
/* Always call this - boards that do not support extended bios translation
will ignore the command, and we will set the proper default */
- *transl = aha1542_mbenable(base_io);
+ aha1542->bios_translation = aha1542_mbenable(sh);
return 0;
}
-#ifndef MODULE
-static char *setup_str[MAXBOARDS] __initdata;
-static int setup_idx = 0;
-
-static void __init aha1542_setup(char *str, int *ints)
+static u8 dma_speed_hw(int dma_speed)
{
- const char *ahausage = "aha1542: usage: aha1542=<PORTBASE>[,<BUSON>,<BUSOFF>[,<DMASPEED>]]\n";
- int setup_portbase;
-
- if (setup_idx >= MAXBOARDS) {
- printk(KERN_ERR "aha1542: aha1542_setup called too many times! Bad LILO params ?\n");
- printk(KERN_ERR " Entryline 1: %s\n", setup_str[0]);
- printk(KERN_ERR " Entryline 2: %s\n", setup_str[1]);
- printk(KERN_ERR " This line: %s\n", str);
- return;
- }
- if (ints[0] < 1 || ints[0] > 4) {
- printk(KERN_ERR "aha1542: %s\n", str);
- printk(ahausage);
- printk(KERN_ERR "aha1542: Wrong parameters may cause system malfunction.. We try anyway..\n");
- }
- setup_called[setup_idx] = ints[0];
- setup_str[setup_idx] = str;
-
- setup_portbase = ints[0] >= 1 ? ints[1] : 0; /* Preserve the default value.. */
- setup_buson[setup_idx] = ints[0] >= 2 ? ints[2] : 7;
- setup_busoff[setup_idx] = ints[0] >= 3 ? ints[3] : 5;
- if (ints[0] >= 4)
- {
- int atbt = -1;
- switch (ints[4]) {
- case 5:
- atbt = 0x00;
- break;
- case 6:
- atbt = 0x04;
- break;
- case 7:
- atbt = 0x01;
- break;
- case 8:
- atbt = 0x02;
- break;
- case 10:
- atbt = 0x03;
- break;
- default:
- printk(KERN_ERR "aha1542: %s\n", str);
- printk(ahausage);
- printk(KERN_ERR "aha1542: Valid values for DMASPEED are 5-8, 10 MB/s. Using jumper defaults.\n");
- break;
- }
- setup_dmaspeed[setup_idx] = atbt;
+ switch (dma_speed) {
+ case 5:
+ return 0x00;
+ case 6:
+ return 0x04;
+ case 7:
+ return 0x01;
+ case 8:
+ return 0x02;
+ case 10:
+ return 0x03;
}
- if (setup_portbase != 0)
- bases[setup_idx] = setup_portbase;
- ++setup_idx;
+ return 0xff; /* invalid */
}
-static int __init do_setup(char *str)
+/* Set the Bus on/off-times as not to ruin floppy performance */
+static void aha1542_set_bus_times(struct Scsi_Host *sh, int bus_on, int bus_off, int dma_speed)
{
- int ints[5];
+ if (bus_on > 0) {
+ u8 oncmd[] = { CMD_BUSON_TIME, clamp(bus_on, 2, 15) };
- int count=setup_idx;
+ aha1542_intr_reset(sh->io_port);
+ if (aha1542_out(sh->io_port, oncmd, 2))
+ goto fail;
+ }
- get_options(str, ARRAY_SIZE(ints), ints);
- aha1542_setup(str,ints);
+ if (bus_off > 0) {
+ u8 offcmd[] = { CMD_BUSOFF_TIME, clamp(bus_off, 1, 64) };
- return count<setup_idx;
-}
+ aha1542_intr_reset(sh->io_port);
+ if (aha1542_out(sh->io_port, offcmd, 2))
+ goto fail;
+ }
-__setup("aha1542=",do_setup);
-#endif
+ if (dma_speed_hw(dma_speed) != 0xff) {
+ u8 dmacmd[] = { CMD_DMASPEED, dma_speed_hw(dma_speed) };
+
+ aha1542_intr_reset(sh->io_port);
+ if (aha1542_out(sh->io_port, dmacmd, 2))
+ goto fail;
+ }
+ aha1542_intr_reset(sh->io_port);
+ return;
+fail:
+ shost_printk(KERN_ERR, sh, "setting bus on/off-time failed\n");
+ aha1542_intr_reset(sh->io_port);
+}
/* return non-zero on detection */
-static int __init aha1542_detect(struct scsi_host_template * tpnt)
+static struct Scsi_Host *aha1542_hw_init(struct scsi_host_template *tpnt, struct device *pdev, int indx)
{
- unsigned char dma_chan;
- unsigned char irq_level;
- unsigned char scsi_id;
- unsigned long flags;
- unsigned int base_io;
- int trans;
- struct Scsi_Host *shpnt = NULL;
- int count = 0;
- int indx;
-
- DEB(printk("aha1542_detect: \n"));
-
- tpnt->proc_name = "aha1542";
-
-#ifdef MODULE
- bases[0] = aha1542[0];
- setup_buson[0] = aha1542[1];
- setup_busoff[0] = aha1542[2];
- {
- int atbt = -1;
- switch (aha1542[3]) {
- case 5:
- atbt = 0x00;
- break;
- case 6:
- atbt = 0x04;
- break;
- case 7:
- atbt = 0x01;
- break;
- case 8:
- atbt = 0x02;
- break;
- case 10:
- atbt = 0x03;
- break;
- };
- setup_dmaspeed[0] = atbt;
+ unsigned int base_io = io[indx];
+ struct Scsi_Host *sh;
+ struct aha1542_hostdata *aha1542;
+ char dma_info[] = "no DMA";
+
+ if (base_io == 0)
+ return NULL;
+
+ if (!request_region(base_io, AHA1542_REGION_SIZE, "aha1542"))
+ return NULL;
+
+ sh = scsi_host_alloc(tpnt, sizeof(struct aha1542_hostdata));
+ if (!sh)
+ goto release;
+ aha1542 = shost_priv(sh);
+
+ sh->unique_id = base_io;
+ sh->io_port = base_io;
+ sh->n_io_port = AHA1542_REGION_SIZE;
+ aha1542->aha1542_last_mbi_used = 2 * AHA1542_MAILBOXES - 1;
+ aha1542->aha1542_last_mbo_used = AHA1542_MAILBOXES - 1;
+
+ if (!aha1542_test_port(sh))
+ goto unregister;
+
+ aha1542_set_bus_times(sh, bus_on[indx], bus_off[indx], dma_speed[indx]);
+ if (aha1542_query(sh))
+ goto unregister;
+ if (aha1542_getconfig(sh) == -1)
+ goto unregister;
+
+ if (sh->dma_channel != 0xFF)
+ snprintf(dma_info, sizeof(dma_info), "DMA %d", sh->dma_channel);
+ shost_printk(KERN_INFO, sh, "Adaptec AHA-1542 (SCSI-ID %d) at IO 0x%x, IRQ %d, %s\n",
+ sh->this_id, base_io, sh->irq, dma_info);
+ if (aha1542->bios_translation == BIOS_TRANSLATION_25563)
+ shost_printk(KERN_INFO, sh, "Using extended bios translation\n");
+
+ setup_mailboxes(sh);
+
+ if (request_irq(sh->irq, aha1542_interrupt, 0, "aha1542", sh)) {
+ shost_printk(KERN_ERR, sh, "Unable to allocate IRQ.\n");
+ goto unregister;
}
-#endif
-
- /*
- * Hunt for ISA Plug'n'Pray Adaptecs (AHA1535)
- */
-
- if(isapnp)
- {
- struct pnp_dev *pdev = NULL;
- for(indx = 0; indx < ARRAY_SIZE(bases); indx++) {
- if(bases[indx])
- continue;
- pdev = pnp_find_dev(NULL, ISAPNP_VENDOR('A', 'D', 'P'),
- ISAPNP_FUNCTION(0x1542), pdev);
- if(pdev==NULL)
- break;
- /*
- * Activate the PnP card
- */
-
- if(pnp_device_attach(pdev)<0)
- continue;
-
- if(pnp_activate_dev(pdev)<0) {
- pnp_device_detach(pdev);
- continue;
- }
-
- if(!pnp_port_valid(pdev, 0)) {
- pnp_device_detach(pdev);
- continue;
- }
-
- bases[indx] = pnp_port_start(pdev, 0);
-
- /* The card can be queried for its DMA, we have
- the DMA set up that is enough */
-
- printk(KERN_INFO "ISAPnP found an AHA1535 at I/O 0x%03X\n", bases[indx]);
+ if (sh->dma_channel != 0xFF) {
+ if (request_dma(sh->dma_channel, "aha1542")) {
+ shost_printk(KERN_ERR, sh, "Unable to allocate DMA channel.\n");
+ goto free_irq;
+ }
+ if (sh->dma_channel == 0 || sh->dma_channel >= 5) {
+ set_dma_mode(sh->dma_channel, DMA_MODE_CASCADE);
+ enable_dma(sh->dma_channel);
}
}
- for (indx = 0; indx < ARRAY_SIZE(bases); indx++)
- if (bases[indx] != 0 && request_region(bases[indx], 4, "aha1542")) {
- shpnt = scsi_register(tpnt,
- sizeof(struct aha1542_hostdata));
-
- if(shpnt==NULL) {
- release_region(bases[indx], 4);
- continue;
- }
- if (!aha1542_test_port(bases[indx], shpnt))
- goto unregister;
-
- base_io = bases[indx];
-
- /* Set the Bus on/off-times as not to ruin floppy performance */
- {
- unchar oncmd[] = {CMD_BUSON_TIME, 7};
- unchar offcmd[] = {CMD_BUSOFF_TIME, 5};
-
- if (setup_called[indx]) {
- oncmd[1] = setup_buson[indx];
- offcmd[1] = setup_busoff[indx];
- }
- aha1542_intr_reset(base_io);
- aha1542_out(base_io, oncmd, 2);
- WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
- aha1542_intr_reset(base_io);
- aha1542_out(base_io, offcmd, 2);
- WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
- if (setup_dmaspeed[indx] >= 0) {
- unchar dmacmd[] = {CMD_DMASPEED, 0};
- dmacmd[1] = setup_dmaspeed[indx];
- aha1542_intr_reset(base_io);
- aha1542_out(base_io, dmacmd, 2);
- WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
- }
- while (0) {
-fail:
- printk(KERN_ERR "aha1542_detect: setting bus on/off-time failed\n");
- }
- aha1542_intr_reset(base_io);
- }
- if (aha1542_query(base_io, &trans))
- goto unregister;
-
- if (aha1542_getconfig(base_io, &irq_level, &dma_chan, &scsi_id) == -1)
- goto unregister;
-
- printk(KERN_INFO "Configuring Adaptec (SCSI-ID %d) at IO:%x, IRQ %d", scsi_id, base_io, irq_level);
- if (dma_chan != 0xFF)
- printk(", DMA priority %d", dma_chan);
- printk("\n");
-
- DEB(aha1542_stat());
- setup_mailboxes(base_io, shpnt);
-
- DEB(aha1542_stat());
-
- DEB(printk("aha1542_detect: enable interrupt channel %d\n", irq_level));
- spin_lock_irqsave(&aha1542_lock, flags);
- if (request_irq(irq_level, do_aha1542_intr_handle, 0,
- "aha1542", shpnt)) {
- printk(KERN_ERR "Unable to allocate IRQ for adaptec controller.\n");
- spin_unlock_irqrestore(&aha1542_lock, flags);
- goto unregister;
- }
- if (dma_chan != 0xFF) {
- if (request_dma(dma_chan, "aha1542")) {
- printk(KERN_ERR "Unable to allocate DMA channel for Adaptec.\n");
- free_irq(irq_level, shpnt);
- spin_unlock_irqrestore(&aha1542_lock, flags);
- goto unregister;
- }
- if (dma_chan == 0 || dma_chan >= 5) {
- set_dma_mode(dma_chan, DMA_MODE_CASCADE);
- enable_dma(dma_chan);
- }
- }
-
- shpnt->this_id = scsi_id;
- shpnt->unique_id = base_io;
- shpnt->io_port = base_io;
- shpnt->n_io_port = 4; /* Number of bytes of I/O space used */
- shpnt->dma_channel = dma_chan;
- shpnt->irq = irq_level;
- HOSTDATA(shpnt)->bios_translation = trans;
- if (trans == BIOS_TRANSLATION_25563)
- printk(KERN_INFO "aha1542.c: Using extended bios translation\n");
- HOSTDATA(shpnt)->aha1542_last_mbi_used = (2 * AHA1542_MAILBOXES - 1);
- HOSTDATA(shpnt)->aha1542_last_mbo_used = (AHA1542_MAILBOXES - 1);
- memset(HOSTDATA(shpnt)->SCint, 0, sizeof(HOSTDATA(shpnt)->SCint));
- spin_unlock_irqrestore(&aha1542_lock, flags);
-#if 0
- DEB(printk(" *** READ CAPACITY ***\n"));
-
- {
- unchar buf[8];
- static unchar cmd[] = { READ_CAPACITY, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- int i;
-
- for (i = 0; i < sizeof(buf); ++i)
- buf[i] = 0x87;
- for (i = 0; i < 2; ++i)
- if (!aha1542_command(i, cmd, buf, sizeof(buf))) {
- printk(KERN_DEBUG "aha_detect: LU %d sector_size %d device_size %d\n",
- i, xscsi2int(buf + 4), xscsi2int(buf));
- }
- }
- DEB(printk(" *** NOW RUNNING MY OWN TEST *** \n"));
+ if (scsi_add_host(sh, pdev))
+ goto free_dma;
- for (i = 0; i < 4; ++i) {
- unsigned char cmd[10];
- static buffer[512];
+ scsi_scan_host(sh);
- cmd[0] = READ_10;
- cmd[1] = 0;
- xany2scsi(cmd + 2, i);
- cmd[6] = 0;
- cmd[7] = 0;
- cmd[8] = 1;
- cmd[9] = 0;
- aha1542_command(0, cmd, buffer, 512);
- }
-#endif
- count++;
- continue;
+ return sh;
+free_dma:
+ if (sh->dma_channel != 0xff)
+ free_dma(sh->dma_channel);
+free_irq:
+ free_irq(sh->irq, sh);
unregister:
- release_region(bases[indx], 4);
- scsi_unregister(shpnt);
- continue;
+ scsi_host_put(sh);
+release:
+ release_region(base_io, AHA1542_REGION_SIZE);
- };
-
- return count;
+ return NULL;
}
-static int aha1542_release(struct Scsi_Host *shost)
+static int aha1542_release(struct Scsi_Host *sh)
{
- if (shost->irq)
- free_irq(shost->irq, shost);
- if (shost->dma_channel != 0xff)
- free_dma(shost->dma_channel);
- if (shost->io_port && shost->n_io_port)
- release_region(shost->io_port, shost->n_io_port);
- scsi_unregister(shost);
+ scsi_remove_host(sh);
+ if (sh->dma_channel != 0xff)
+ free_dma(sh->dma_channel);
+ if (sh->irq)
+ free_irq(sh->irq, sh);
+ if (sh->io_port && sh->n_io_port)
+ release_region(sh->io_port, sh->n_io_port);
+ scsi_host_put(sh);
return 0;
}
-static int aha1542_restart(struct Scsi_Host *shost)
-{
- int i;
- int count = 0;
-#if 0
- unchar ahacmd = CMD_START_SCSI;
-#endif
-
- for (i = 0; i < AHA1542_MAILBOXES; i++)
- if (HOSTDATA(shost)->SCint[i] &&
- !(HOSTDATA(shost)->SCint[i]->device->soft_reset)) {
-#if 0
- HOSTDATA(shost)->mb[i].status = 1; /* Indicate ready to restart... */
-#endif
- count++;
- }
- printk(KERN_DEBUG "Potential to restart %d stalled commands...\n", count);
-#if 0
- /* start scsi command */
- if (count)
- aha1542_out(shost->io_port, &ahacmd, 1);
-#endif
- return 0;
-}
/*
* This is a device reset. This is handled by sending a special command
* to the device.
*/
-static int aha1542_dev_reset(Scsi_Cmnd * SCpnt)
+static int aha1542_dev_reset(struct scsi_cmnd *cmd)
{
+ struct Scsi_Host *sh = cmd->device->host;
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
unsigned long flags;
- struct mailbox *mb;
- unchar target = SCpnt->device->id;
- unchar lun = SCpnt->device->lun;
+ struct mailbox *mb = aha1542->mb;
+ u8 target = cmd->device->id;
+ u8 lun = cmd->device->lun;
int mbo;
- struct ccb *ccb;
- unchar ahacmd = CMD_START_SCSI;
-
- ccb = HOSTDATA(SCpnt->device->host)->ccb;
- mb = HOSTDATA(SCpnt->device->host)->mb;
+ struct ccb *ccb = aha1542->ccb;
- spin_lock_irqsave(&aha1542_lock, flags);
- mbo = HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used + 1;
+ spin_lock_irqsave(sh->host_lock, flags);
+ mbo = aha1542->aha1542_last_mbo_used + 1;
if (mbo >= AHA1542_MAILBOXES)
mbo = 0;
do {
- if (mb[mbo].status == 0 && HOSTDATA(SCpnt->device->host)->SCint[mbo] == NULL)
+ if (mb[mbo].status == 0 && aha1542->int_cmds[mbo] == NULL)
break;
mbo++;
if (mbo >= AHA1542_MAILBOXES)
mbo = 0;
- } while (mbo != HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used);
+ } while (mbo != aha1542->aha1542_last_mbo_used);
- if (mb[mbo].status || HOSTDATA(SCpnt->device->host)->SCint[mbo])
+ if (mb[mbo].status || aha1542->int_cmds[mbo])
panic("Unable to find empty mailbox for aha1542.\n");
- HOSTDATA(SCpnt->device->host)->SCint[mbo] = SCpnt; /* This will effectively
- prevent someone else from
- screwing with this cdb. */
+ aha1542->int_cmds[mbo] = cmd; /* This will effectively
+ prevent someone else from
+ screwing with this cdb. */
- HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used = mbo;
- spin_unlock_irqrestore(&aha1542_lock, flags);
+ aha1542->aha1542_last_mbo_used = mbo;
- any2scsi(mb[mbo].ccbptr, SCSI_BUF_PA(&ccb[mbo])); /* This gets trashed for some reason */
+ any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo])); /* This gets trashed for some reason */
memset(&ccb[mbo], 0, sizeof(struct ccb));
@@ -1274,141 +838,43 @@ static int aha1542_dev_reset(Scsi_Cmnd * SCpnt)
* Now tell the 1542 to flush all pending commands for this
* target
*/
- aha1542_out(SCpnt->device->host->io_port, &ahacmd, 1);
+ aha1542_outb(sh->io_port, CMD_START_SCSI);
+ spin_unlock_irqrestore(sh->host_lock, flags);
- scmd_printk(KERN_WARNING, SCpnt,
+ scmd_printk(KERN_WARNING, cmd,
"Trying device reset for target\n");
return SUCCESS;
-
-
-#ifdef ERIC_neverdef
- /*
- * With the 1542 we apparently never get an interrupt to
- * acknowledge a device reset being sent. Then again, Leonard
- * says we are doing this wrong in the first place...
- *
- * Take a wait and see attitude. If we get spurious interrupts,
- * then the device reset is doing something sane and useful, and
- * we will wait for the interrupt to post completion.
- */
- printk(KERN_WARNING "Sent BUS DEVICE RESET to target %d\n", SCpnt->target);
-
- /*
- * Free the command block for all commands running on this
- * target...
- */
- for (i = 0; i < AHA1542_MAILBOXES; i++) {
- if (HOSTDATA(SCpnt->host)->SCint[i] &&
- HOSTDATA(SCpnt->host)->SCint[i]->target == SCpnt->target) {
- Scsi_Cmnd *SCtmp;
- SCtmp = HOSTDATA(SCpnt->host)->SCint[i];
- kfree(SCtmp->host_scribble);
- SCtmp->host_scribble = NULL;
- HOSTDATA(SCpnt->host)->SCint[i] = NULL;
- HOSTDATA(SCpnt->host)->mb[i].status = 0;
- }
- }
- return SUCCESS;
-
- return FAILED;
-#endif /* ERIC_neverdef */
}
-static int aha1542_bus_reset(Scsi_Cmnd * SCpnt)
+static int aha1542_reset(struct scsi_cmnd *cmd, u8 reset_cmd)
{
+ struct Scsi_Host *sh = cmd->device->host;
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
+ unsigned long flags;
int i;
+ spin_lock_irqsave(sh->host_lock, flags);
/*
* This does a scsi reset for all devices on the bus.
* In principle, we could also reset the 1542 - should
* we do this? Try this first, and we can add that later
* if it turns out to be useful.
*/
- outb(SCRST, CONTROL(SCpnt->device->host->io_port));
+ outb(reset_cmd, CONTROL(cmd->device->host->io_port));
- /*
- * Wait for the thing to settle down a bit. Unfortunately
- * this is going to basically lock up the machine while we
- * wait for this to complete. To be 100% correct, we need to
- * check for timeout, and if we are doing something like this
- * we are pretty desperate anyways.
- */
- ssleep(4);
-
- spin_lock_irq(SCpnt->device->host->host_lock);
-
- WAIT(STATUS(SCpnt->device->host->io_port),
- STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF);
-
- /*
- * Now try to pick up the pieces. For all pending commands,
- * free any internal data structures, and basically clear things
- * out. We do not try and restart any commands or anything -
- * the strategy handler takes care of that crap.
- */
- printk(KERN_WARNING "Sent BUS RESET to scsi host %d\n", SCpnt->device->host->host_no);
-
- for (i = 0; i < AHA1542_MAILBOXES; i++) {
- if (HOSTDATA(SCpnt->device->host)->SCint[i] != NULL) {
- Scsi_Cmnd *SCtmp;
- SCtmp = HOSTDATA(SCpnt->device->host)->SCint[i];
-
-
- if (SCtmp->device->soft_reset) {
- /*
- * If this device implements the soft reset option,
- * then it is still holding onto the command, and
- * may yet complete it. In this case, we don't
- * flush the data.
- */
- continue;
- }
- kfree(SCtmp->host_scribble);
- SCtmp->host_scribble = NULL;
- HOSTDATA(SCpnt->device->host)->SCint[i] = NULL;
- HOSTDATA(SCpnt->device->host)->mb[i].status = 0;
- }
+ if (!wait_mask(STATUS(cmd->device->host->io_port),
+ STATMASK, IDLE, STST | DIAGF | INVDCMD | DF | CDF, 0)) {
+ spin_unlock_irqrestore(sh->host_lock, flags);
+ return FAILED;
}
- spin_unlock_irq(SCpnt->device->host->host_lock);
- return SUCCESS;
-
-fail:
- spin_unlock_irq(SCpnt->device->host->host_lock);
- return FAILED;
-}
-
-static int aha1542_host_reset(Scsi_Cmnd * SCpnt)
-{
- int i;
-
- /*
- * This does a scsi reset for all devices on the bus.
- * In principle, we could also reset the 1542 - should
- * we do this? Try this first, and we can add that later
- * if it turns out to be useful.
- */
- outb(HRST | SCRST, CONTROL(SCpnt->device->host->io_port));
-
- /*
- * Wait for the thing to settle down a bit. Unfortunately
- * this is going to basically lock up the machine while we
- * wait for this to complete. To be 100% correct, we need to
- * check for timeout, and if we are doing something like this
- * we are pretty desperate anyways.
- */
- ssleep(4);
- spin_lock_irq(SCpnt->device->host->host_lock);
-
- WAIT(STATUS(SCpnt->device->host->io_port),
- STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF);
-
/*
* We need to do this too before the 1542 can interact with
- * us again.
+ * us again after host reset.
*/
- setup_mailboxes(SCpnt->device->host->io_port, SCpnt->device->host);
+ if (reset_cmd & HRST)
+ setup_mailboxes(cmd->device->host);
/*
* Now try to pick up the pieces. For all pending commands,
@@ -1416,14 +882,14 @@ static int aha1542_host_reset(Scsi_Cmnd * SCpnt)
* out. We do not try and restart any commands or anything -
* the strategy handler takes care of that crap.
*/
- printk(KERN_WARNING "Sent BUS RESET to scsi host %d\n", SCpnt->device->host->host_no);
+ shost_printk(KERN_WARNING, cmd->device->host, "Sent BUS RESET to scsi host %d\n", cmd->device->host->host_no);
for (i = 0; i < AHA1542_MAILBOXES; i++) {
- if (HOSTDATA(SCpnt->device->host)->SCint[i] != NULL) {
- Scsi_Cmnd *SCtmp;
- SCtmp = HOSTDATA(SCpnt->device->host)->SCint[i];
+ if (aha1542->int_cmds[i] != NULL) {
+ struct scsi_cmnd *tmp_cmd;
+ tmp_cmd = aha1542->int_cmds[i];
- if (SCtmp->device->soft_reset) {
+ if (tmp_cmd->device->soft_reset) {
/*
* If this device implements the soft reset option,
* then it is still holding onto the command, and
@@ -1432,241 +898,51 @@ static int aha1542_host_reset(Scsi_Cmnd * SCpnt)
*/
continue;
}
- kfree(SCtmp->host_scribble);
- SCtmp->host_scribble = NULL;
- HOSTDATA(SCpnt->device->host)->SCint[i] = NULL;
- HOSTDATA(SCpnt->device->host)->mb[i].status = 0;
+ kfree(tmp_cmd->host_scribble);
+ tmp_cmd->host_scribble = NULL;
+ aha1542->int_cmds[i] = NULL;
+ aha1542->mb[i].status = 0;
}
}
- spin_unlock_irq(SCpnt->device->host->host_lock);
+ spin_unlock_irqrestore(sh->host_lock, flags);
return SUCCESS;
-
-fail:
- spin_unlock_irq(SCpnt->device->host->host_lock);
- return FAILED;
}
-#if 0
-/*
- * These are the old error handling routines. They are only temporarily
- * here while we play with the new error handling code.
- */
-static int aha1542_old_abort(Scsi_Cmnd * SCpnt)
+static int aha1542_bus_reset(struct scsi_cmnd *cmd)
{
-#if 0
- unchar ahacmd = CMD_START_SCSI;
- unsigned long flags;
- struct mailbox *mb;
- int mbi, mbo, i;
-
- printk(KERN_DEBUG "In aha1542_abort: %x %x\n",
- inb(STATUS(SCpnt->host->io_port)),
- inb(INTRFLAGS(SCpnt->host->io_port)));
-
- spin_lock_irqsave(&aha1542_lock, flags);
- mb = HOSTDATA(SCpnt->host)->mb;
- mbi = HOSTDATA(SCpnt->host)->aha1542_last_mbi_used + 1;
- if (mbi >= 2 * AHA1542_MAILBOXES)
- mbi = AHA1542_MAILBOXES;
-
- do {
- if (mb[mbi].status != 0)
- break;
- mbi++;
- if (mbi >= 2 * AHA1542_MAILBOXES)
- mbi = AHA1542_MAILBOXES;
- } while (mbi != HOSTDATA(SCpnt->host)->aha1542_last_mbi_used);
- spin_unlock_irqrestore(&aha1542_lock, flags);
-
- if (mb[mbi].status) {
- printk(KERN_ERR "Lost interrupt discovered on irq %d - attempting to recover\n",
- SCpnt->host->irq);
- aha1542_intr_handle(SCpnt->host, NULL);
- return 0;
- }
- /* OK, no lost interrupt. Try looking to see how many pending commands
- we think we have. */
-
- for (i = 0; i < AHA1542_MAILBOXES; i++)
- if (HOSTDATA(SCpnt->host)->SCint[i]) {
- if (HOSTDATA(SCpnt->host)->SCint[i] == SCpnt) {
- printk(KERN_ERR "Timed out command pending for %s\n",
- SCpnt->request->rq_disk ?
- SCpnt->request->rq_disk->disk_name : "?"
- );
- if (HOSTDATA(SCpnt->host)->mb[i].status) {
- printk(KERN_ERR "OGMB still full - restarting\n");
- aha1542_out(SCpnt->host->io_port, &ahacmd, 1);
- };
- } else
- printk(KERN_ERR "Other pending command %s\n",
- SCpnt->request->rq_disk ?
- SCpnt->request->rq_disk->disk_name : "?"
- );
- }
-#endif
-
- DEB(printk("aha1542_abort\n"));
-#if 0
- spin_lock_irqsave(&aha1542_lock, flags);
- for (mbo = 0; mbo < AHA1542_MAILBOXES; mbo++) {
- if (SCpnt == HOSTDATA(SCpnt->host)->SCint[mbo]) {
- mb[mbo].status = 2; /* Abort command */
- aha1542_out(SCpnt->host->io_port, &ahacmd, 1); /* start scsi command */
- spin_unlock_irqrestore(&aha1542_lock, flags);
- break;
- }
- }
- if (AHA1542_MAILBOXES == mbo)
- spin_unlock_irqrestore(&aha1542_lock, flags);
-#endif
- return SCSI_ABORT_SNOOZE;
+ return aha1542_reset(cmd, SCRST);
}
-/* We do not implement a reset function here, but the upper level code
- assumes that it will get some kind of response for the command in
- SCpnt. We must oblige, or the command will hang the scsi system.
- For a first go, we assume that the 1542 notifies us with all of the
- pending commands (it does implement soft reset, after all). */
-
-static int aha1542_old_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags)
+static int aha1542_host_reset(struct scsi_cmnd *cmd)
{
- unchar ahacmd = CMD_START_SCSI;
- int i;
-
- /*
- * See if a bus reset was suggested.
- */
- if (reset_flags & SCSI_RESET_SUGGEST_BUS_RESET) {
- /*
- * This does a scsi reset for all devices on the bus.
- * In principle, we could also reset the 1542 - should
- * we do this? Try this first, and we can add that later
- * if it turns out to be useful.
- */
- outb(HRST | SCRST, CONTROL(SCpnt->host->io_port));
-
- /*
- * Wait for the thing to settle down a bit. Unfortunately
- * this is going to basically lock up the machine while we
- * wait for this to complete. To be 100% correct, we need to
- * check for timeout, and if we are doing something like this
- * we are pretty desperate anyways.
- */
- WAIT(STATUS(SCpnt->host->io_port),
- STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF);
-
- /*
- * We need to do this too before the 1542 can interact with
- * us again.
- */
- setup_mailboxes(SCpnt->host->io_port, SCpnt->host);
-
- /*
- * Now try to pick up the pieces. Restart all commands
- * that are currently active on the bus, and reset all of
- * the datastructures. We have some time to kill while
- * things settle down, so print a nice message.
- */
- printk(KERN_WARNING "Sent BUS RESET to scsi host %d\n", SCpnt->host->host_no);
-
- for (i = 0; i < AHA1542_MAILBOXES; i++)
- if (HOSTDATA(SCpnt->host)->SCint[i] != NULL) {
- Scsi_Cmnd *SCtmp;
- SCtmp = HOSTDATA(SCpnt->host)->SCint[i];
- SCtmp->result = DID_RESET << 16;
- kfree(SCtmp->host_scribble);
- SCtmp->host_scribble = NULL;
- printk(KERN_WARNING "Sending DID_RESET for target %d\n", SCpnt->target);
- SCtmp->scsi_done(SCpnt);
-
- HOSTDATA(SCpnt->host)->SCint[i] = NULL;
- HOSTDATA(SCpnt->host)->mb[i].status = 0;
- }
- /*
- * Now tell the mid-level code what we did here. Since
- * we have restarted all of the outstanding commands,
- * then report SUCCESS.
- */
- return (SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET);
-fail:
- printk(KERN_CRIT "aha1542.c: Unable to perform hard reset.\n");
- printk(KERN_CRIT "Power cycle machine to reset\n");
- return (SCSI_RESET_ERROR | SCSI_RESET_BUS_RESET);
-
-
- } else {
- /* This does a selective reset of just the one device */
- /* First locate the ccb for this command */
- for (i = 0; i < AHA1542_MAILBOXES; i++)
- if (HOSTDATA(SCpnt->host)->SCint[i] == SCpnt) {
- HOSTDATA(SCpnt->host)->ccb[i].op = 0x81; /* BUS DEVICE RESET */
- /* Now tell the 1542 to flush all pending commands for this target */
- aha1542_out(SCpnt->host->io_port, &ahacmd, 1);
-
- /* Here is the tricky part. What to do next. Do we get an interrupt
- for the commands that we aborted with the specified target, or
- do we generate this on our own? Try it without first and see
- what happens */
- printk(KERN_WARNING "Sent BUS DEVICE RESET to target %d\n", SCpnt->target);
-
- /* If the first does not work, then try the second. I think the
- first option is more likely to be correct. Free the command
- block for all commands running on this target... */
- for (i = 0; i < AHA1542_MAILBOXES; i++)
- if (HOSTDATA(SCpnt->host)->SCint[i] &&
- HOSTDATA(SCpnt->host)->SCint[i]->target == SCpnt->target) {
- Scsi_Cmnd *SCtmp;
- SCtmp = HOSTDATA(SCpnt->host)->SCint[i];
- SCtmp->result = DID_RESET << 16;
- kfree(SCtmp->host_scribble);
- SCtmp->host_scribble = NULL;
- printk(KERN_WARNING "Sending DID_RESET for target %d\n", SCpnt->target);
- SCtmp->scsi_done(SCpnt);
-
- HOSTDATA(SCpnt->host)->SCint[i] = NULL;
- HOSTDATA(SCpnt->host)->mb[i].status = 0;
- }
- return SCSI_RESET_SUCCESS;
- }
- }
- /* No active command at this time, so this means that each time we got
- some kind of response the last time through. Tell the mid-level code
- to request sense information in order to decide what to do next. */
- return SCSI_RESET_PUNT;
+ return aha1542_reset(cmd, HRST | SCRST);
}
-#endif /* end of big comment block around old_abort + old_reset */
static int aha1542_biosparam(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity, int *ip)
+ struct block_device *bdev, sector_t capacity, int geom[])
{
- int translation_algorithm;
- int size = capacity;
-
- translation_algorithm = HOSTDATA(sdev->host)->bios_translation;
+ struct aha1542_hostdata *aha1542 = shost_priv(sdev->host);
- if ((size >> 11) > 1024 && translation_algorithm == BIOS_TRANSLATION_25563) {
+ if (capacity >= 0x200000 &&
+ aha1542->bios_translation == BIOS_TRANSLATION_25563) {
/* Please verify that this is the same as what DOS returns */
- ip[0] = 255;
- ip[1] = 63;
- ip[2] = size / 255 / 63;
+ geom[0] = 255; /* heads */
+ geom[1] = 63; /* sectors */
} else {
- ip[0] = 64;
- ip[1] = 32;
- ip[2] = size >> 11;
+ geom[0] = 64; /* heads */
+ geom[1] = 32; /* sectors */
}
+ geom[2] = sector_div(capacity, geom[0] * geom[1]); /* cylinders */
return 0;
}
MODULE_LICENSE("GPL");
-
static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
.proc_name = "aha1542",
.name = "Adaptec 1542",
- .detect = aha1542_detect,
- .release = aha1542_release,
.queuecommand = aha1542_queuecommand,
.eh_device_reset_handler= aha1542_dev_reset,
.eh_bus_reset_handler = aha1542_bus_reset,
@@ -1674,9 +950,124 @@ static struct scsi_host_template driver_template = {
.bios_param = aha1542_biosparam,
.can_queue = AHA1542_MAILBOXES,
.this_id = 7,
- .sg_tablesize = AHA1542_SCATTER,
- .cmd_per_lun = AHA1542_CMDLUN,
+ .sg_tablesize = 16,
+ .cmd_per_lun = 1,
.unchecked_isa_dma = 1,
.use_clustering = ENABLE_CLUSTERING,
};
-#include "scsi_module.c"
+
+static int aha1542_isa_match(struct device *pdev, unsigned int ndev)
+{
+ struct Scsi_Host *sh = aha1542_hw_init(&driver_template, pdev, ndev);
+
+ if (!sh)
+ return 0;
+
+ dev_set_drvdata(pdev, sh);
+ return 1;
+}
+
+static int aha1542_isa_remove(struct device *pdev,
+ unsigned int ndev)
+{
+ aha1542_release(dev_get_drvdata(pdev));
+ dev_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct isa_driver aha1542_isa_driver = {
+ .match = aha1542_isa_match,
+ .remove = aha1542_isa_remove,
+ .driver = {
+ .name = "aha1542"
+ },
+};
+static int isa_registered;
+
+#ifdef CONFIG_PNP
+static struct pnp_device_id aha1542_pnp_ids[] = {
+ { .id = "ADP1542" },
+ { .id = "" }
+};
+MODULE_DEVICE_TABLE(pnp, aha1542_pnp_ids);
+
+static int aha1542_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id)
+{
+ int indx;
+ struct Scsi_Host *sh;
+
+ for (indx = 0; indx < ARRAY_SIZE(io); indx++) {
+ if (io[indx])
+ continue;
+
+ if (pnp_activate_dev(pdev) < 0)
+ continue;
+
+ io[indx] = pnp_port_start(pdev, 0);
+
+ /* The card can be queried for its DMA, we have
+ the DMA set up that is enough */
+
+ dev_info(&pdev->dev, "ISAPnP found an AHA1535 at I/O 0x%03X", io[indx]);
+ }
+
+ sh = aha1542_hw_init(&driver_template, &pdev->dev, indx);
+ if (!sh)
+ return -ENODEV;
+
+ pnp_set_drvdata(pdev, sh);
+ return 0;
+}
+
+static void aha1542_pnp_remove(struct pnp_dev *pdev)
+{
+ aha1542_release(pnp_get_drvdata(pdev));
+ pnp_set_drvdata(pdev, NULL);
+}
+
+static struct pnp_driver aha1542_pnp_driver = {
+ .name = "aha1542",
+ .id_table = aha1542_pnp_ids,
+ .probe = aha1542_pnp_probe,
+ .remove = aha1542_pnp_remove,
+};
+static int pnp_registered;
+#endif /* CONFIG_PNP */
+
+static int __init aha1542_init(void)
+{
+ int ret = 0;
+
+#ifdef CONFIG_PNP
+ if (isapnp) {
+ ret = pnp_register_driver(&aha1542_pnp_driver);
+ if (!ret)
+ pnp_registered = 1;
+ }
+#endif
+ ret = isa_register_driver(&aha1542_isa_driver, MAXBOARDS);
+ if (!ret)
+ isa_registered = 1;
+
+#ifdef CONFIG_PNP
+ if (pnp_registered)
+ ret = 0;
+#endif
+ if (isa_registered)
+ ret = 0;
+
+ return ret;
+}
+
+static void __exit aha1542_exit(void)
+{
+#ifdef CONFIG_PNP
+ if (pnp_registered)
+ pnp_unregister_driver(&aha1542_pnp_driver);
+#endif
+ if (isa_registered)
+ isa_unregister_driver(&aha1542_isa_driver);
+}
+
+module_init(aha1542_init);
+module_exit(aha1542_exit);
diff --git a/drivers/scsi/aha1542.h b/drivers/scsi/aha1542.h
index b871d2b57f93..0fe9bae1b3d1 100644
--- a/drivers/scsi/aha1542.h
+++ b/drivers/scsi/aha1542.h
@@ -1,64 +1,35 @@
-#ifndef _AHA1542_H
-
-/* $Id: aha1542.h,v 1.1 1992/07/24 06:27:38 root Exp root $
- *
- * Header file for the adaptec 1542 driver for Linux
- *
- * $Log: aha1542.h,v $
- * Revision 1.1 1992/07/24 06:27:38 root
- * Initial revision
- *
- * Revision 1.2 1992/07/04 18:41:49 root
- * Replaced distribution with current drivers
- *
- * Revision 1.3 1992/06/23 23:58:20 root
- * Fixes.
- *
- * Revision 1.2 1992/05/26 22:13:23 root
- * Changed bug that prevented DMA above first 2 mbytes.
- *
- * Revision 1.1 1992/05/22 21:00:29 root
- * Initial revision
- *
- * Revision 1.1 1992/04/24 18:01:50 root
- * Initial revision
- *
- * Revision 1.1 1992/04/02 03:23:13 drew
- * Initial revision
- *
- * Revision 1.3 1992/01/27 14:46:29 tthorn
- * *** empty log message ***
- *
- */
+#ifndef _AHA1542_H_
+#define _AHA1542_H_
#include <linux/types.h>
/* I/O Port interface 4.2 */
/* READ */
#define STATUS(base) base
-#define STST 0x80 /* Self Test in Progress */
-#define DIAGF 0x40 /* Internal Diagnostic Failure */
-#define INIT 0x20 /* Mailbox Initialization Required */
-#define IDLE 0x10 /* SCSI Host Adapter Idle */
-#define CDF 0x08 /* Command/Data Out Port Full */
-#define DF 0x04 /* Data In Port Full */
-#define INVDCMD 0x01 /* Invalid H A Command */
-#define STATMASK 0xfd /* 0x02 is reserved */
+#define STST BIT(7) /* Self Test in Progress */
+#define DIAGF BIT(6) /* Internal Diagnostic Failure */
+#define INIT BIT(5) /* Mailbox Initialization Required */
+#define IDLE BIT(4) /* SCSI Host Adapter Idle */
+#define CDF BIT(3) /* Command/Data Out Port Full */
+#define DF BIT(2) /* Data In Port Full */
+/* BIT(1) is reserved */
+#define INVDCMD BIT(0) /* Invalid H A Command */
+#define STATMASK (STST | DIAGF | INIT | IDLE | CDF | DF | INVDCMD)
#define INTRFLAGS(base) (STATUS(base)+2)
-#define ANYINTR 0x80 /* Any Interrupt */
-#define SCRD 0x08 /* SCSI Reset Detected */
-#define HACC 0x04 /* HA Command Complete */
-#define MBOA 0x02 /* MBO Empty */
-#define MBIF 0x01 /* MBI Full */
-#define INTRMASK 0x8f
+#define ANYINTR BIT(7) /* Any Interrupt */
+#define SCRD BIT(3) /* SCSI Reset Detected */
+#define HACC BIT(2) /* HA Command Complete */
+#define MBOA BIT(1) /* MBO Empty */
+#define MBIF BIT(0) /* MBI Full */
+#define INTRMASK (ANYINTR | SCRD | HACC | MBOA | MBIF)
/* WRITE */
#define CONTROL(base) STATUS(base)
-#define HRST 0x80 /* Hard Reset */
-#define SRST 0x40 /* Soft Reset */
-#define IRST 0x20 /* Interrupt Reset */
-#define SCRST 0x10 /* SCSI Bus Reset */
+#define HRST BIT(7) /* Hard Reset */
+#define SRST BIT(6) /* Soft Reset */
+#define IRST BIT(5) /* Interrupt Reset */
+#define SCRST BIT(4) /* SCSI Bus Reset */
/* READ/WRITE */
#define DATA(base) (STATUS(base)+1)
@@ -80,14 +51,14 @@
/* Mailbox Definition 5.2.1 and 5.2.2 */
struct mailbox {
- unchar status; /* Command/Status */
- unchar ccbptr[3]; /* msb, .., lsb */
+ u8 status; /* Command/Status */
+ u8 ccbptr[3]; /* msb, .., lsb */
};
/* This is used with scatter-gather */
struct chain {
- unchar datalen[3]; /* Size of this part of chain */
- unchar dataptr[3]; /* Location of data */
+ u8 datalen[3]; /* Size of this part of chain */
+ u8 dataptr[3]; /* Location of data */
};
/* These belong in scsi.h also */
@@ -100,51 +71,32 @@ static inline void any2scsi(u8 *p, u32 v)
#define scsi2int(up) ( (((long)*(up)) << 16) + (((long)(up)[1]) << 8) + ((long)(up)[2]) )
-#define xany2scsi(up, p) \
-(up)[0] = ((long)(p)) >> 24; \
-(up)[1] = ((long)(p)) >> 16; \
-(up)[2] = ((long)(p)) >> 8; \
-(up)[3] = ((long)(p));
-
#define xscsi2int(up) ( (((long)(up)[0]) << 24) + (((long)(up)[1]) << 16) \
+ (((long)(up)[2]) << 8) + ((long)(up)[3]) )
#define MAX_CDB 12
#define MAX_SENSE 14
-struct ccb { /* Command Control Block 5.3 */
- unchar op; /* Command Control Block Operation Code */
- unchar idlun; /* op=0,2:Target Id, op=1:Initiator Id */
- /* Outbound data transfer, length is checked*/
- /* Inbound data transfer, length is checked */
- /* Logical Unit Number */
- unchar cdblen; /* SCSI Command Length */
- unchar rsalen; /* Request Sense Allocation Length/Disable */
- unchar datalen[3]; /* Data Length (msb, .., lsb) */
- unchar dataptr[3]; /* Data Pointer */
- unchar linkptr[3]; /* Link Pointer */
- unchar commlinkid; /* Command Linking Identifier */
- unchar hastat; /* Host Adapter Status (HASTAT) */
- unchar tarstat; /* Target Device Status */
- unchar reserved[2];
- unchar cdb[MAX_CDB+MAX_SENSE];/* SCSI Command Descriptor Block */
- /* REQUEST SENSE */
+struct ccb { /* Command Control Block 5.3 */
+ u8 op; /* Command Control Block Operation Code */
+ u8 idlun; /* op=0,2:Target Id, op=1:Initiator Id */
+ /* Outbound data transfer, length is checked*/
+ /* Inbound data transfer, length is checked */
+ /* Logical Unit Number */
+ u8 cdblen; /* SCSI Command Length */
+ u8 rsalen; /* Request Sense Allocation Length/Disable */
+ u8 datalen[3]; /* Data Length (msb, .., lsb) */
+ u8 dataptr[3]; /* Data Pointer */
+ u8 linkptr[3]; /* Link Pointer */
+ u8 commlinkid; /* Command Linking Identifier */
+ u8 hastat; /* Host Adapter Status (HASTAT) */
+ u8 tarstat; /* Target Device Status */
+ u8 reserved[2];
+ u8 cdb[MAX_CDB+MAX_SENSE]; /* SCSI Command Descriptor Block */
+ /* REQUEST SENSE */
};
-static int aha1542_detect(struct scsi_host_template *);
-static int aha1542_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
-static int aha1542_bus_reset(Scsi_Cmnd * SCpnt);
-static int aha1542_dev_reset(Scsi_Cmnd * SCpnt);
-static int aha1542_host_reset(Scsi_Cmnd * SCpnt);
-#if 0
-static int aha1542_old_abort(Scsi_Cmnd * SCpnt);
-static int aha1542_old_reset(Scsi_Cmnd *, unsigned int);
-#endif
-static int aha1542_biosparam(struct scsi_device *, struct block_device *,
- sector_t, int *);
-
+#define AHA1542_REGION_SIZE 4
#define AHA1542_MAILBOXES 8
-#define AHA1542_SCATTER 16
-#define AHA1542_CMDLUN 1
-#endif
+#endif /* _AHA1542_H_ */
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 97f2accd3dbb..109e2c99e6c1 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -10437,14 +10437,13 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
return;
}
}
- lstate = kmalloc(sizeof(*lstate), GFP_ATOMIC);
+ lstate = kzalloc(sizeof(*lstate), GFP_ATOMIC);
if (lstate == NULL) {
xpt_print_path(ccb->ccb_h.path);
printk("Couldn't allocate lstate\n");
ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
return;
}
- memset(lstate, 0, sizeof(*lstate));
status = xpt_create_path(&lstate->path, /*periph*/NULL,
xpt_path_path_id(ccb->ccb_h.path),
xpt_path_target_id(ccb->ccb_h.path),
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index d5c7b193d8d3..ce96a0be3282 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -1326,10 +1326,9 @@ int
ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
{
ahd->platform_data =
- kmalloc(sizeof(struct ahd_platform_data), GFP_ATOMIC);
+ kzalloc(sizeof(struct ahd_platform_data), GFP_ATOMIC);
if (ahd->platform_data == NULL)
return (ENOMEM);
- memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
ahd->platform_data->irq = AHD_LINUX_NOIRQ;
ahd_lockinit(ahd);
ahd->seltime = (aic79xx_seltime & 0x3) << 4;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 10172a3af1b9..c4829d84b335 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -4464,10 +4464,9 @@ ahc_softc_init(struct ahc_softc *ahc)
ahc->pause = ahc->unpause | PAUSE;
/* XXX The shared scb data stuff should be deprecated */
if (ahc->scb_data == NULL) {
- ahc->scb_data = kmalloc(sizeof(*ahc->scb_data), GFP_ATOMIC);
+ ahc->scb_data = kzalloc(sizeof(*ahc->scb_data), GFP_ATOMIC);
if (ahc->scb_data == NULL)
return (ENOMEM);
- memset(ahc->scb_data, 0, sizeof(*ahc->scb_data));
}
return (0);
@@ -4780,10 +4779,10 @@ ahc_init_scbdata(struct ahc_softc *ahc)
SLIST_INIT(&scb_data->sg_maps);
/* Allocate SCB resources */
- scb_data->scbarray = kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC);
+ scb_data->scbarray = kzalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC,
+ GFP_ATOMIC);
if (scb_data->scbarray == NULL)
return (ENOMEM);
- memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC);
/* Determine the number of hardware SCBs and initialize them */
@@ -7558,14 +7557,13 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
return;
}
}
- lstate = kmalloc(sizeof(*lstate), GFP_ATOMIC);
+ lstate = kzalloc(sizeof(*lstate), GFP_ATOMIC);
if (lstate == NULL) {
xpt_print_path(ccb->ccb_h.path);
printk("Couldn't allocate lstate\n");
ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
return;
}
- memset(lstate, 0, sizeof(*lstate));
status = xpt_create_path(&lstate->path, /*periph*/NULL,
xpt_path_path_id(ccb->ccb_h.path),
xpt_path_target_id(ccb->ccb_h.path),
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 88360116dbcb..a2f2c774cd6b 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1214,10 +1214,9 @@ ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
{
ahc->platform_data =
- kmalloc(sizeof(struct ahc_platform_data), GFP_ATOMIC);
+ kzalloc(sizeof(struct ahc_platform_data), GFP_ATOMIC);
if (ahc->platform_data == NULL)
return (ENOMEM);
- memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data));
ahc->platform_data->irq = AHC_LINUX_NOIRQ;
ahc_lockinit(ahc);
ahc->seltime = (aic7xxx_seltime & 0x3) << 4;
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index a70255413e7f..db87ece6edb2 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -1486,7 +1486,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
* selection.
*/
- timeout = jiffies + (250 * HZ / 1000);
+ timeout = jiffies + msecs_to_jiffies(250);
/*
* XXX very interesting - we're seeing a bounce where the BSY we
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index d1c37a386947..5ede3daa93dc 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -1014,7 +1014,6 @@ static struct platform_driver atari_scsi_driver = {
.remove = __exit_p(atari_scsi_remove),
.driver = {
.name = DRV_MODULE_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index e90a3742f09d..cc3b9d3d6d40 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1079,22 +1079,18 @@ bfad_start_ops(struct bfad_s *bfad) {
int
bfad_worker(void *ptr)
{
- struct bfad_s *bfad;
- unsigned long flags;
-
- bfad = (struct bfad_s *)ptr;
-
- while (!kthread_should_stop()) {
+ struct bfad_s *bfad = ptr;
+ unsigned long flags;
- /* Send event BFAD_E_INIT_SUCCESS */
- bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
+ if (kthread_should_stop())
+ return 0;
- spin_lock_irqsave(&bfad->bfad_lock, flags);
- bfad->bfad_tsk = NULL;
- spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ /* Send event BFAD_E_INIT_SUCCESS */
+ bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
- break;
- }
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfad_tsk = NULL;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index f35792f7051c..f8d2478b11cc 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -57,9 +57,9 @@
*/
/* settings for DTC3181E card with only Mustek scanner attached */
-#define USLEEP_POLL 1
-#define USLEEP_SLEEP 20
-#define USLEEP_WAITLONG 500
+#define USLEEP_POLL msecs_to_jiffies(10)
+#define USLEEP_SLEEP msecs_to_jiffies(200)
+#define USLEEP_WAITLONG msecs_to_jiffies(5000)
#define AUTOPROBE_IRQ
@@ -723,7 +723,7 @@ module_param(ncr_53c400a, int, 0);
module_param(dtc_3181e, int, 0);
MODULE_LICENSE("GPL");
-#ifndef SCSI_G_NCR5380_MEM
+#if !defined(SCSI_G_NCR5380_MEM) && defined(MODULE)
static struct isapnp_device_id id_table[] = {
{
ISAPNP_ANY_ID, ISAPNP_ANY_ID,
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d9afc51af7d3..882744852aac 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -99,6 +99,7 @@ static unsigned int ipr_debug = 0;
static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
static unsigned int ipr_dual_ioa_raid = 1;
static unsigned int ipr_number_of_msix = 2;
+static unsigned int ipr_fast_reboot;
static DEFINE_SPINLOCK(ipr_driver_lock);
/* This table describes the differences between DMA controller chips */
@@ -221,6 +222,8 @@ MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
"[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
+module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
MODULE_LICENSE("GPL");
MODULE_VERSION(IPR_DRIVER_VERSION);
@@ -495,6 +498,10 @@ struct ipr_error_table_t ipr_error_table[] = {
"4061: Multipath redundancy level got better"},
{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
"4060: Multipath redundancy level got worse"},
+ {0x06808100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9083: Device raw mode enabled"},
+ {0x06808200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9084: Device raw mode disabled"},
{0x07270000, 0, 0,
"Failure due to other device"},
{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
@@ -1462,7 +1469,8 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (ioasc) {
- if (ioasc != IPR_IOASC_IOA_WAS_RESET)
+ if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
+ ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
dev_err(&ioa_cfg->pdev->dev,
"Host RCB failed with IOASC: 0x%08X\n", ioasc);
@@ -2566,7 +2574,8 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
ipr_handle_log_data(ioa_cfg, hostrcb);
if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
- } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
+ } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
+ ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
dev_err(&ioa_cfg->pdev->dev,
"Host RCB failed with IOASC: 0x%08X\n", ioasc);
}
@@ -4491,11 +4500,83 @@ static struct device_attribute ipr_resource_type_attr = {
.show = ipr_show_resource_type
};
+/**
+ * ipr_show_raw_mode - Show the adapter's raw mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_raw_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags = 0;
+ ssize_t len;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = (struct ipr_resource_entry *)sdev->hostdata;
+ if (res)
+ len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
+ else
+ len = -ENXIO;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return len;
+}
+
+/**
+ * ipr_store_raw_mode - Change the adapter's raw mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_store_raw_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags = 0;
+ ssize_t len;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = (struct ipr_resource_entry *)sdev->hostdata;
+ if (res) {
+ if (ioa_cfg->sis64 && ipr_is_af_dasd_device(res)) {
+ res->raw_mode = simple_strtoul(buf, NULL, 10);
+ len = strlen(buf);
+ if (res->sdev)
+ sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
+ res->raw_mode ? "enabled" : "disabled");
+ } else
+ len = -EINVAL;
+ } else
+ len = -ENXIO;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return len;
+}
+
+static struct device_attribute ipr_raw_mode_attr = {
+ .attr = {
+ .name = "raw_mode",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .show = ipr_show_raw_mode,
+ .store = ipr_store_raw_mode
+};
+
static struct device_attribute *ipr_dev_attrs[] = {
&ipr_adapter_handle_attr,
&ipr_resource_path_attr,
&ipr_device_id_attr,
&ipr_resource_type_attr,
+ &ipr_raw_mode_attr,
NULL,
};
@@ -5379,9 +5460,6 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
/* Mask the interrupt */
writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
-
- /* Clear the interrupt */
- writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
list_del(&ioa_cfg->reset_cmd->queue);
@@ -6150,6 +6228,13 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
break;
case IPR_IOASC_NR_INIT_CMD_REQUIRED:
break;
+ case IPR_IOASC_IR_NON_OPTIMIZED:
+ if (res->raw_mode) {
+ res->raw_mode = 0;
+ scsi_cmd->result |= (DID_IMM_RETRY << 16);
+ } else
+ scsi_cmd->result |= (DID_ERROR << 16);
+ break;
default:
if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
scsi_cmd->result |= (DID_ERROR << 16);
@@ -6289,6 +6374,8 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
(!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
}
+ if (res->raw_mode && ipr_is_af_dasd_device(res))
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
if (ioa_cfg->sis64)
rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
@@ -6402,7 +6489,6 @@ static struct scsi_host_template driver_template = {
.shost_attrs = ipr_ioa_attrs,
.sdev_attrs = ipr_dev_attrs,
.proc_name = IPR_NAME,
- .no_write_same = 1,
.use_blk_tags = 1,
};
@@ -8318,7 +8404,6 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
{
ENTER;
- pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
ipr_cmd->job_step = ipr_reset_bist_done;
ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
LEAVE;
@@ -8326,6 +8411,32 @@ static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_reset_reset_work - Pulse a PCIe fundamental reset
+ * @work: work struct
+ *
+ * Description: This pulses warm reset to a slot.
+ *
+ **/
+static void ipr_reset_reset_work(struct work_struct *work)
+{
+ struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct pci_dev *pdev = ioa_cfg->pdev;
+ unsigned long lock_flags = 0;
+
+ ENTER;
+ pci_set_pcie_reset_state(pdev, pcie_warm_reset);
+ msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
+ pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (ioa_cfg->reset_cmd == ipr_cmd)
+ ipr_reset_ioa_job(ipr_cmd);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ LEAVE;
+}
+
+/**
* ipr_reset_slot_reset - Reset the PCI slot of the adapter.
* @ipr_cmd: ipr command struct
*
@@ -8337,12 +8448,11 @@ static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
- struct pci_dev *pdev = ioa_cfg->pdev;
ENTER;
- pci_set_pcie_reset_state(pdev, pcie_warm_reset);
+ INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
+ queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
ipr_cmd->job_step = ipr_reset_slot_reset_done;
- ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
LEAVE;
return IPR_RC_JOB_RETURN;
}
@@ -8480,6 +8590,122 @@ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_reset_quiesce_done - Complete IOA disconnect
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: Freeze the adapter to complete quiesce processing
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ ENTER;
+ ipr_cmd->job_step = ipr_ioa_bringdown_done;
+ ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
+ LEAVE;
+ return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_reset_cancel_hcam_done - Check for outstanding commands
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: Ensure nothing is outstanding to the IOA and
+ * proceed with IOA disconnect. Otherwise reset the IOA.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_cmnd *loop_cmd;
+ struct ipr_hrr_queue *hrrq;
+ int rc = IPR_RC_JOB_CONTINUE;
+ int count = 0;
+
+ ENTER;
+ ipr_cmd->job_step = ipr_reset_quiesce_done;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
+ count++;
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ rc = IPR_RC_JOB_RETURN;
+ break;
+ }
+ spin_unlock(&hrrq->_lock);
+
+ if (count)
+ break;
+ }
+
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: Cancel any oustanding HCAMs to the IOA.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ int rc = IPR_RC_JOB_CONTINUE;
+ struct ipr_cmd_pkt *cmd_pkt;
+ struct ipr_cmnd *hcam_cmd;
+ struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
+
+ ENTER;
+ ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
+
+ if (!hrrq->ioa_is_dead) {
+ if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
+ list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
+ continue;
+
+ ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+ ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
+ cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
+ cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
+ cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
+ cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
+ cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
+ cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
+ cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
+ cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
+ cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
+ cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
+ cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
+
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
+ IPR_CANCEL_TIMEOUT);
+
+ rc = IPR_RC_JOB_RETURN;
+ ipr_cmd->job_step = ipr_reset_cancel_hcam;
+ break;
+ }
+ }
+ } else
+ ipr_cmd->job_step = ipr_reset_alert;
+
+ LEAVE;
+ return rc;
+}
+
+/**
* ipr_reset_ucode_download_done - Microcode download completion
* @ipr_cmd: ipr command struct
*
@@ -8561,7 +8787,9 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
int rc = IPR_RC_JOB_CONTINUE;
ENTER;
- if (shutdown_type != IPR_SHUTDOWN_NONE &&
+ if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
+ ipr_cmd->job_step = ipr_reset_cancel_hcam;
+ else if (shutdown_type != IPR_SHUTDOWN_NONE &&
!ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
@@ -8917,13 +9145,15 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
{
int i;
- for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
- if (ioa_cfg->ipr_cmnd_list[i])
- dma_pool_free(ioa_cfg->ipr_cmd_pool,
- ioa_cfg->ipr_cmnd_list[i],
- ioa_cfg->ipr_cmnd_list_dma[i]);
+ if (ioa_cfg->ipr_cmnd_list) {
+ for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
+ if (ioa_cfg->ipr_cmnd_list[i])
+ dma_pool_free(ioa_cfg->ipr_cmd_pool,
+ ioa_cfg->ipr_cmnd_list[i],
+ ioa_cfg->ipr_cmnd_list_dma[i]);
- ioa_cfg->ipr_cmnd_list[i] = NULL;
+ ioa_cfg->ipr_cmnd_list[i] = NULL;
+ }
}
if (ioa_cfg->ipr_cmd_pool)
@@ -8973,26 +9203,25 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
}
/**
- * ipr_free_all_resources - Free all allocated resources for an adapter.
- * @ipr_cmd: ipr command struct
+ * ipr_free_irqs - Free all allocated IRQs for the adapter.
+ * @ioa_cfg: ipr cfg struct
*
- * This function frees all allocated resources for the
+ * This function frees all allocated IRQs for the
* specified adapter.
*
* Return value:
* none
**/
-static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
+static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
{
struct pci_dev *pdev = ioa_cfg->pdev;
- ENTER;
if (ioa_cfg->intr_flag == IPR_USE_MSI ||
ioa_cfg->intr_flag == IPR_USE_MSIX) {
int i;
for (i = 0; i < ioa_cfg->nvectors; i++)
free_irq(ioa_cfg->vectors_info[i].vec,
- &ioa_cfg->hrrq[i]);
+ &ioa_cfg->hrrq[i]);
} else
free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
@@ -9003,7 +9232,26 @@ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
pci_disable_msix(pdev);
ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
}
+}
+/**
+ * ipr_free_all_resources - Free all allocated resources for an adapter.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function frees all allocated resources for the
+ * specified adapter.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct pci_dev *pdev = ioa_cfg->pdev;
+
+ ENTER;
+ ipr_free_irqs(ioa_cfg);
+ if (ioa_cfg->reset_work_q)
+ destroy_workqueue(ioa_cfg->reset_work_q);
iounmap(ioa_cfg->hdw_dma_regs);
pci_release_regions(pdev);
ipr_free_mem(ioa_cfg);
@@ -9823,6 +10071,14 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
(dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
ioa_cfg->needs_warm_reset = 1;
ioa_cfg->reset = ipr_reset_slot_reset;
+
+ ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
+ WQ_MEM_RECLAIM, host->host_no);
+
+ if (!ioa_cfg->reset_work_q) {
+ dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
+ goto out_free_irq;
+ }
} else
ioa_cfg->reset = ipr_reset_start_bist;
@@ -9834,6 +10090,8 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
out:
return rc;
+out_free_irq:
+ ipr_free_irqs(ioa_cfg);
cleanup_nolog:
ipr_free_mem(ioa_cfg);
out_msi_disable:
@@ -9914,6 +10172,8 @@ static void __ipr_remove(struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
flush_work(&ioa_cfg->work_q);
+ if (ioa_cfg->reset_work_q)
+ flush_workqueue(ioa_cfg->reset_work_q);
INIT_LIST_HEAD(&ioa_cfg->used_res_q);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
@@ -10036,6 +10296,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
{
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
unsigned long lock_flags = 0;
+ enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
int i;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -10051,9 +10312,16 @@ static void ipr_shutdown(struct pci_dev *pdev)
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
}
- ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
+ if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
+ shutdown_type = IPR_SHUTDOWN_QUIESCE;
+
+ ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
+ ipr_free_irqs(ioa_cfg);
+ pci_disable_device(ioa_cfg->pdev);
+ }
}
static struct pci_device_id ipr_pci_table[] = {
@@ -10211,7 +10479,8 @@ static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
+ (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
continue;
}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index ec03b42fa2b9..47412cf4eaac 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -39,8 +39,8 @@
/*
* Literals
*/
-#define IPR_DRIVER_VERSION "2.6.0"
-#define IPR_DRIVER_DATE "(November 16, 2012)"
+#define IPR_DRIVER_VERSION "2.6.1"
+#define IPR_DRIVER_DATE "(March 12, 2015)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -138,6 +138,7 @@
#define IPR_IOASC_BUS_WAS_RESET 0x06290000
#define IPR_IOASC_BUS_WAS_RESET_BY_OTHER 0x06298000
#define IPR_IOASC_ABORTED_CMD_TERM_BY_HOST 0x0B5A0000
+#define IPR_IOASC_IR_NON_OPTIMIZED 0x05258200
#define IPR_FIRST_DRIVER_IOASC 0x10000000
#define IPR_IOASC_IOA_WAS_RESET 0x10000001
@@ -196,6 +197,8 @@
/*
* Adapter Commands
*/
+#define IPR_CANCEL_REQUEST 0xC0
+#define IPR_CANCEL_64BIT_IOARCB 0x01
#define IPR_QUERY_RSRC_STATE 0xC2
#define IPR_RESET_DEVICE 0xC3
#define IPR_RESET_TYPE_SELECT 0x80
@@ -222,6 +225,7 @@
#define IPR_ABBREV_SHUTDOWN_TIMEOUT (10 * HZ)
#define IPR_DUAL_IOA_ABBR_SHUTDOWN_TO (2 * 60 * HZ)
#define IPR_DEVICE_RESET_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
+#define IPR_CANCEL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
#define IPR_CANCEL_ALL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
#define IPR_ABORT_TASK_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
#define IPR_INTERNAL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
@@ -518,6 +522,7 @@ struct ipr_cmd_pkt {
#define IPR_RQTYPE_IOACMD 0x01
#define IPR_RQTYPE_HCAM 0x02
#define IPR_RQTYPE_ATA_PASSTHRU 0x04
+#define IPR_RQTYPE_PIPE 0x05
u8 reserved2;
@@ -1271,6 +1276,7 @@ struct ipr_resource_entry {
u8 del_from_ml:1;
u8 resetting_device:1;
u8 reset_occurred:1;
+ u8 raw_mode:1;
u32 bus; /* AKA channel */
u32 target; /* AKA id */
@@ -1402,7 +1408,8 @@ enum ipr_shutdown_type {
IPR_SHUTDOWN_NORMAL = 0x00,
IPR_SHUTDOWN_PREPARE_FOR_NORMAL = 0x40,
IPR_SHUTDOWN_ABBREV = 0x80,
- IPR_SHUTDOWN_NONE = 0x100
+ IPR_SHUTDOWN_NONE = 0x100,
+ IPR_SHUTDOWN_QUIESCE = 0x101,
};
struct ipr_trace_entry {
@@ -1536,6 +1543,7 @@ struct ipr_ioa_cfg {
u8 saved_mode_page_len;
struct work_struct work_q;
+ struct workqueue_struct *reset_work_q;
wait_queue_head_t reset_wait_q;
wait_queue_head_t msi_wait_q;
@@ -1587,6 +1595,7 @@ struct ipr_cmnd {
struct ata_queued_cmd *qc;
struct completion completion;
struct timer_list timer;
+ struct work_struct work;
void (*fast_done) (struct ipr_cmnd *);
void (*done) (struct ipr_cmnd *);
int (*job_step) (struct ipr_cmnd *);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 434e9037908e..9b81a34d7449 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -413,6 +413,9 @@ struct lpfc_vport {
uint32_t cfg_fcp_class;
uint32_t cfg_use_adisc;
uint32_t cfg_fdmi_on;
+#define LPFC_FDMI_SUPPORT 1 /* bit 0 - FDMI supported? */
+#define LPFC_FDMI_REG_DELAY 2 /* bit 1 - 60 sec registration delay */
+#define LPFC_FDMI_ALL_ATTRIB 4 /* bit 2 - register ALL attributes? */
uint32_t cfg_discovery_threads;
uint32_t cfg_log_verbose;
uint32_t cfg_max_luns;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 2f9b96826ac0..d65bd178d131 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -406,8 +406,13 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ char fwrev[FW_REV_STR_SIZE];
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
- return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
+ lpfc_decode_firmware_rev(phba, fwrev, 1);
+ return snprintf(buf, PAGE_SIZE, "%s\n", fwrev);
}
/**
@@ -4568,12 +4573,18 @@ LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
/*
# lpfc_fdmi_on: controls FDMI support.
-# 0 = no FDMI support
-# 1 = support FDMI without attribute of hostname
-# 2 = support FDMI with attribute of hostname
-# Value range [0,2]. Default value is 0.
+# Set NOT Set
+# bit 0 = FDMI support no FDMI support
+# LPFC_FDMI_SUPPORT just turns basic support on/off
+# bit 1 = Register delay no register delay (60 seconds)
+# LPFC_FDMI_REG_DELAY 60 sec registration delay after FDMI login
+# bit 2 = All attributes Use a attribute subset
+# LPFC_FDMI_ALL_ATTRIB applies to both port and HBA attributes
+# Port attrutes subset: 1 thru 6 OR all: 1 thru 0xd 0x101 0x102 0x103
+# HBA attributes subset: 1 thru 0xb OR all: 1 thru 0xc
+# Value range [0,7]. Default value is 0.
*/
-LPFC_VPORT_ATTR_RW(fdmi_on, 0, 0, 2, "Enable FDMI support");
+LPFC_VPORT_ATTR_RW(fdmi_on, 0, 0, 7, "Enable FDMI support");
/*
# Specifies the maximum number of ELS cmds we can have outstanding (for
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index a7bf359aa0c6..b705068079c0 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -3194,6 +3194,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
cmd->unsli3.rcvsli3.ox_id = 0xffff;
}
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK;
cmdiocbq->vport = phba->pport;
cmdiocbq->iocb_cmpl = NULL;
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
@@ -4179,6 +4180,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
switch (opcode) {
case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
+ case COMN_OPCODE_GET_PROFILE_CONFIG:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3106 Handled SLI_CONFIG "
"subsys_comn, opcode:x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 928ef609f363..e557bcdbcb19 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2010-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -246,6 +246,7 @@ struct lpfc_sli_config_emb1_subsys {
#define lpfc_emb1_subcmnd_subsys_WORD word6
/* Subsystem COMN (0x01) OpCodes */
#define SLI_CONFIG_SUBSYS_COMN 0x01
+#define COMN_OPCODE_GET_PROFILE_CONFIG 0xA4
#define COMN_OPCODE_READ_OBJECT 0xAB
#define COMN_OPCODE_WRITE_OBJECT 0xAC
#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 00665a5d92fd..587e3e962f2b 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -284,6 +284,7 @@ void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
struct lpfc_sli_ring *, uint32_t);
void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *);
void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
@@ -354,6 +355,7 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *);
extern struct device_attribute *lpfc_hba_attrs[];
extern struct device_attribute *lpfc_vport_attrs[];
extern struct scsi_host_template lpfc_template;
+extern struct scsi_host_template lpfc_template_s3;
extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 61a32cd23f79..af129966bd11 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -555,7 +555,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
}
}
}
- if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY)))
+ if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY)))
goto nsout1;
Cnt -= sizeof (uint32_t);
}
@@ -641,7 +641,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
- be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
+ cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0208 NameServer Rsp Data: x%x\n",
vport->fc_flag);
@@ -1074,11 +1074,48 @@ lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
- n = snprintf(symbol, size, "Emulex %s FV%s DV%s",
- vport->phba->ModelName, fwrev, lpfc_release_version);
+ n = snprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
+
+ if (size < n)
+ return n;
+ n += snprintf(symbol + n, size - n, " FV%s", fwrev);
+
+ if (size < n)
+ return n;
+ n += snprintf(symbol + n, size - n, " DV%s", lpfc_release_version);
+
+ if (size < n)
+ return n;
+ n += snprintf(symbol + n, size - n, " HN:%s", init_utsname()->nodename);
+
+ /* Note :- OS name is "Linux" */
+ if (size < n)
+ return n;
+ n += snprintf(symbol + n, size - n, " OS:%s", init_utsname()->sysname);
+
return n;
}
+static uint32_t
+lpfc_find_map_node(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct Scsi_Host *shost;
+ uint32_t cnt = 0;
+
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (ndlp->nlp_type & NLP_FABRIC)
+ continue;
+ if ((ndlp->nlp_state == NLP_STE_MAPPED_NODE) ||
+ (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE))
+ cnt++;
+ }
+ spin_unlock_irq(shost->host_lock);
+ return cnt;
+}
+
/*
* lpfc_ns_cmd
* Description:
@@ -1177,7 +1214,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
switch (cmdcode) {
case SLI_CTNS_GID_FT:
CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_GID_FT);
+ cpu_to_be16(SLI_CTNS_GID_FT);
CtReq->un.gid.Fc4Type = SLI_CTPT_FCP;
if (vport->port_state < LPFC_NS_QRY)
vport->port_state = LPFC_NS_QRY;
@@ -1188,7 +1225,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
case SLI_CTNS_GFF_ID:
CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_GFF_ID);
+ cpu_to_be16(SLI_CTNS_GFF_ID);
CtReq->un.gff.PortId = cpu_to_be32(context);
cmpl = lpfc_cmpl_ct_cmd_gff_id;
break;
@@ -1196,7 +1233,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
case SLI_CTNS_RFT_ID:
vport->ct_flags &= ~FC_CT_RFT_ID;
CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_RFT_ID);
+ cpu_to_be16(SLI_CTNS_RFT_ID);
CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID);
CtReq->un.rft.fcpReg = 1;
cmpl = lpfc_cmpl_ct_cmd_rft_id;
@@ -1205,7 +1242,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
case SLI_CTNS_RNN_ID:
vport->ct_flags &= ~FC_CT_RNN_ID;
CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_RNN_ID);
+ cpu_to_be16(SLI_CTNS_RNN_ID);
CtReq->un.rnn.PortId = cpu_to_be32(vport->fc_myDID);
memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename,
sizeof (struct lpfc_name));
@@ -1215,7 +1252,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
case SLI_CTNS_RSPN_ID:
vport->ct_flags &= ~FC_CT_RSPN_ID;
CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_RSPN_ID);
+ cpu_to_be16(SLI_CTNS_RSPN_ID);
CtReq->un.rspn.PortId = cpu_to_be32(vport->fc_myDID);
size = sizeof(CtReq->un.rspn.symbname);
CtReq->un.rspn.len =
@@ -1226,7 +1263,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
case SLI_CTNS_RSNN_NN:
vport->ct_flags &= ~FC_CT_RSNN_NN;
CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_RSNN_NN);
+ cpu_to_be16(SLI_CTNS_RSNN_NN);
memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename,
sizeof (struct lpfc_name));
size = sizeof(CtReq->un.rsnn.symbname);
@@ -1238,14 +1275,14 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
case SLI_CTNS_DA_ID:
/* Implement DA_ID Nameserver request */
CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_DA_ID);
+ cpu_to_be16(SLI_CTNS_DA_ID);
CtReq->un.da_id.port_id = cpu_to_be32(vport->fc_myDID);
cmpl = lpfc_cmpl_ct_cmd_da_id;
break;
case SLI_CTNS_RFF_ID:
vport->ct_flags &= ~FC_CT_RFF_ID;
CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_RFF_ID);
+ cpu_to_be16(SLI_CTNS_RFF_ID);
CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);
CtReq->un.rff.fbits = FC4_FEATURE_INIT;
CtReq->un.rff.type_code = FC_TYPE_FCP;
@@ -1299,7 +1336,6 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
uint32_t latt;
latt = lpfc_els_chk_latt(vport);
-
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"FDMI cmpl: status:x%x/x%x latt:%d",
irsp->ulpStatus, irsp->un.ulpWord[4], latt);
@@ -1310,29 +1346,49 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"ulpStatus: x%x, rid x%x\n",
be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus,
irsp->un.ulpWord[4]);
- lpfc_ct_free_iocb(phba, cmdiocb);
- return;
+ goto fail_out;
}
ndlp = lpfc_findnode_did(vport, FDMI_DID);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
goto fail_out;
- if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
+ if (fdmi_rsp == cpu_to_be16(SLI_CT_RESPONSE_FS_RJT)) {
/* FDMI rsp failed */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0220 FDMI rsp failed Data: x%x\n",
be16_to_cpu(fdmi_cmd));
}
+fail_out:
+ lpfc_ct_free_iocb(phba, cmdiocb);
+}
+
+static void
+lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct lpfc_dmabuf *inp = cmdiocb->context1;
+ struct lpfc_sli_ct_request *CTcmd = inp->virt;
+ uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
+ struct lpfc_nodelist *ndlp;
+
+ lpfc_cmpl_ct_cmd_fdmi(phba, cmdiocb, rspiocb);
+
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ return;
+
+ /*
+ * Need to cycle thru FDMI registration for discovery
+ * DHBA -> DPRT -> RHBA -> RPA
+ */
switch (be16_to_cpu(fdmi_cmd)) {
case SLI_MGMT_RHBA:
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA);
break;
- case SLI_MGMT_RPA:
- break;
-
case SLI_MGMT_DHBA:
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT);
break;
@@ -1341,12 +1397,9 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA);
break;
}
-
-fail_out:
- lpfc_ct_free_iocb(phba, cmdiocb);
- return;
}
+
int
lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
{
@@ -1355,18 +1408,28 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
struct lpfc_sli_ct_request *CtReq;
struct ulp_bde64 *bpl;
uint32_t size;
- REG_HBA *rh;
- PORT_ENTRY *pe;
- REG_PORT_ATTRIBUTE *pab;
- ATTRIBUTE_BLOCK *ab;
- ATTRIBUTE_ENTRY *ae;
+ uint32_t rsp_size;
+ struct lpfc_fdmi_reg_hba *rh;
+ struct lpfc_fdmi_port_entry *pe;
+ struct lpfc_fdmi_reg_portattr *pab = NULL;
+ struct lpfc_fdmi_attr_block *ab = NULL;
+ struct lpfc_fdmi_attr_entry *ae;
+ struct lpfc_fdmi_attr_def *ad;
void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
+ if (ndlp == NULL) {
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ return 0;
+ cmpl = lpfc_cmpl_ct_cmd_fdmi; /* cmd interface */
+ } else {
+ cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */
+ }
/* fill in BDEs for command */
/* Allocate buffer for command payload */
- mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!mp)
goto fdmi_cmd_exit;
@@ -1375,7 +1438,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
goto fdmi_cmd_free_mp;
/* Allocate buffer for Buffer ptr list */
- bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!bmp)
goto fdmi_cmd_free_mpvirt;
@@ -1390,205 +1453,330 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0218 FDMI Request Data: x%x x%x x%x\n",
vport->fc_flag, vport->port_state, cmdcode);
- CtReq = (struct lpfc_sli_ct_request *) mp->virt;
+ CtReq = (struct lpfc_sli_ct_request *)mp->virt;
+ /* First populate the CT_IU preamble */
memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
CtReq->RevisionId.bits.InId = 0;
CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE;
CtReq->FsSubType = SLI_CT_FDMI_Subtypes;
+
+ CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode);
+ rsp_size = LPFC_BPL_SIZE;
size = 0;
+ /* Next fill in the specific FDMI cmd information */
switch (cmdcode) {
+ case SLI_MGMT_RHAT:
case SLI_MGMT_RHBA:
{
lpfc_vpd_t *vp = &phba->vpd;
uint32_t i, j, incr;
- int len;
+ int len = 0;
- CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_MGMT_RHBA);
- CtReq->CommandResponse.bits.Size = 0;
- rh = (REG_HBA *) & CtReq->un.PortID;
+ rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un.PortID;
+ /* HBA Identifier */
memcpy(&rh->hi.PortName, &vport->fc_sparam.portName,
- sizeof (struct lpfc_name));
- /* One entry (port) per adapter */
- rh->rpl.EntryCnt = be32_to_cpu(1);
- memcpy(&rh->rpl.pe, &vport->fc_sparam.portName,
- sizeof (struct lpfc_name));
-
- /* point to the HBA attribute block */
- size = 2 * sizeof (struct lpfc_name) + FOURBYTES;
- ab = (ATTRIBUTE_BLOCK *) ((uint8_t *) rh + size);
+ sizeof(struct lpfc_name));
+
+ if (cmdcode == SLI_MGMT_RHBA) {
+ /* Registered Port List */
+ /* One entry (port) per adapter */
+ rh->rpl.EntryCnt = cpu_to_be32(1);
+ memcpy(&rh->rpl.pe, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+
+ /* point to the HBA attribute block */
+ size = 2 * sizeof(struct lpfc_name) +
+ FOURBYTES;
+ } else {
+ size = sizeof(struct lpfc_name);
+ }
+ ab = (struct lpfc_fdmi_attr_block *)
+ ((uint8_t *)rh + size);
ab->EntryCnt = 0;
+ size += FOURBYTES;
- /* Point to the beginning of the first HBA attribute
- entry */
+ /*
+ * Point to beginning of first HBA attribute entry
+ */
/* #1 HBA attribute entry */
- size += FOURBYTES;
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(NODE_NAME);
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES
- + sizeof (struct lpfc_name));
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(struct lpfc_name));
+ ad->AttrType = cpu_to_be16(RHBA_NODENAME);
+ ad->AttrLen = cpu_to_be16(FOURBYTES
+ + sizeof(struct lpfc_name));
memcpy(&ae->un.NodeName, &vport->fc_sparam.nodeName,
- sizeof (struct lpfc_name));
+ sizeof(struct lpfc_name));
ab->EntryCnt++;
- size += FOURBYTES + sizeof (struct lpfc_name);
+ size += FOURBYTES + sizeof(struct lpfc_name);
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #2 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(MANUFACTURER);
- strncpy(ae->un.Manufacturer, "Emulex Corporation", 64);
- len = strlen(ae->un.Manufacturer);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.Manufacturer));
+ ad->AttrType = cpu_to_be16(RHBA_MANUFACTURER);
+ strncpy(ae->un.Manufacturer, "Emulex Corporation",
+ sizeof(ae->un.Manufacturer));
+ len = strnlen(ae->un.Manufacturer,
+ sizeof(ae->un.Manufacturer));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
ab->EntryCnt++;
size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #3 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(SERIAL_NUMBER);
- strncpy(ae->un.SerialNumber, phba->SerialNumber, 64);
- len = strlen(ae->un.SerialNumber);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.SerialNumber));
+ ad->AttrType = cpu_to_be16(RHBA_SERIAL_NUMBER);
+ strncpy(ae->un.SerialNumber, phba->SerialNumber,
+ sizeof(ae->un.SerialNumber));
+ len = strnlen(ae->un.SerialNumber,
+ sizeof(ae->un.SerialNumber));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
ab->EntryCnt++;
size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #4 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(MODEL);
- strncpy(ae->un.Model, phba->ModelName, 256);
- len = strlen(ae->un.Model);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.Model));
+ ad->AttrType = cpu_to_be16(RHBA_MODEL);
+ strncpy(ae->un.Model, phba->ModelName,
+ sizeof(ae->un.Model));
+ len = strnlen(ae->un.Model, sizeof(ae->un.Model));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
ab->EntryCnt++;
size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #5 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(MODEL_DESCRIPTION);
- strncpy(ae->un.ModelDescription, phba->ModelDesc, 256);
- len = strlen(ae->un.ModelDescription);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.ModelDescription));
+ ad->AttrType = cpu_to_be16(RHBA_MODEL_DESCRIPTION);
+ strncpy(ae->un.ModelDescription, phba->ModelDesc,
+ sizeof(ae->un.ModelDescription));
+ len = strnlen(ae->un.ModelDescription,
+ sizeof(ae->un.ModelDescription));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
ab->EntryCnt++;
size += FOURBYTES + len;
+ if ((size + 8) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #6 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(HARDWARE_VERSION);
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 8);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, 8);
+ ad->AttrType = cpu_to_be16(RHBA_HARDWARE_VERSION);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 8);
/* Convert JEDEC ID to ascii for hardware version */
incr = vp->rev.biuRev;
for (i = 0; i < 8; i++) {
j = (incr & 0xf);
if (j <= 9)
ae->un.HardwareVersion[7 - i] =
- (char)((uint8_t) 0x30 +
- (uint8_t) j);
+ (char)((uint8_t)0x30 +
+ (uint8_t)j);
else
ae->un.HardwareVersion[7 - i] =
- (char)((uint8_t) 0x61 +
- (uint8_t) (j - 10));
+ (char)((uint8_t)0x61 +
+ (uint8_t)(j - 10));
incr = (incr >> 4);
}
ab->EntryCnt++;
size += FOURBYTES + 8;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #7 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(DRIVER_VERSION);
- strncpy(ae->un.DriverVersion,
- lpfc_release_version, 256);
- len = strlen(ae->un.DriverVersion);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.DriverVersion));
+ ad->AttrType = cpu_to_be16(RHBA_DRIVER_VERSION);
+ strncpy(ae->un.DriverVersion, lpfc_release_version,
+ sizeof(ae->un.DriverVersion));
+ len = strnlen(ae->un.DriverVersion,
+ sizeof(ae->un.DriverVersion));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
ab->EntryCnt++;
size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #8 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(OPTION_ROM_VERSION);
- strncpy(ae->un.OptionROMVersion,
- phba->OptionROMVersion, 256);
- len = strlen(ae->un.OptionROMVersion);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.OptionROMVersion));
+ ad->AttrType = cpu_to_be16(RHBA_OPTION_ROM_VERSION);
+ strncpy(ae->un.OptionROMVersion, phba->OptionROMVersion,
+ sizeof(ae->un.OptionROMVersion));
+ len = strnlen(ae->un.OptionROMVersion,
+ sizeof(ae->un.OptionROMVersion));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
ab->EntryCnt++;
size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #9 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(FIRMWARE_VERSION);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.FirmwareVersion));
+ ad->AttrType = cpu_to_be16(RHBA_FIRMWARE_VERSION);
lpfc_decode_firmware_rev(phba, ae->un.FirmwareVersion,
1);
- len = strlen(ae->un.FirmwareVersion);
+ len = strnlen(ae->un.FirmwareVersion,
+ sizeof(ae->un.FirmwareVersion));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
ab->EntryCnt++;
size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #10 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION);
- sprintf(ae->un.OsNameVersion, "%s %s %s",
- init_utsname()->sysname,
- init_utsname()->release,
- init_utsname()->version);
- len = strlen(ae->un.OsNameVersion);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.OsNameVersion));
+ ad->AttrType = cpu_to_be16(RHBA_OS_NAME_VERSION);
+ snprintf(ae->un.OsNameVersion,
+ sizeof(ae->un.OsNameVersion),
+ "%s %s %s",
+ init_utsname()->sysname,
+ init_utsname()->release,
+ init_utsname()->version);
+ len = strnlen(ae->un.OsNameVersion,
+ sizeof(ae->un.OsNameVersion));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
ab->EntryCnt++;
size += FOURBYTES + len;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #11 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(MAX_CT_PAYLOAD_LEN);
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
- ae->un.MaxCTPayloadLen = (65 * 4096);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType =
+ cpu_to_be16(RHBA_MAX_CT_PAYLOAD_LEN);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ ae->un.MaxCTPayloadLen = cpu_to_be32(LPFC_MAX_CT_SIZE);
ab->EntryCnt++;
size += FOURBYTES + 4;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
- ab->EntryCnt = be32_to_cpu(ab->EntryCnt);
+ /*
+ * Currently switches don't seem to support the
+ * following extended HBA attributes.
+ */
+ if (!(vport->cfg_fdmi_on & LPFC_FDMI_ALL_ATTRIB))
+ goto hba_out;
+
+ /* #12 HBA attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.NodeSymName));
+ ad->AttrType = cpu_to_be16(RHBA_SYM_NODENAME);
+ len = lpfc_vport_symbolic_node_name(vport,
+ ae->un.NodeSymName, sizeof(ae->un.NodeSymName));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+hba_out:
+ ab->EntryCnt = cpu_to_be32(ab->EntryCnt);
/* Total size */
size = GID_REQUEST_SZ - 4 + size;
}
break;
+ case SLI_MGMT_RPRT:
case SLI_MGMT_RPA:
{
lpfc_vpd_t *vp;
struct serv_parm *hsp;
- int len;
+ int len = 0;
vp = &phba->vpd;
- CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_MGMT_RPA);
- CtReq->CommandResponse.bits.Size = 0;
- pab = (REG_PORT_ATTRIBUTE *) & CtReq->un.PortID;
- size = sizeof (struct lpfc_name) + FOURBYTES;
- memcpy((uint8_t *) & pab->PortName,
- (uint8_t *) & vport->fc_sparam.portName,
- sizeof (struct lpfc_name));
+ if (cmdcode == SLI_MGMT_RPRT) {
+ rh = (struct lpfc_fdmi_reg_hba *)
+ &CtReq->un.PortID;
+ /* HBA Identifier */
+ memcpy(&rh->hi.PortName,
+ &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ pab = (struct lpfc_fdmi_reg_portattr *)
+ &rh->rpl.EntryCnt;
+ } else
+ pab = (struct lpfc_fdmi_reg_portattr *)
+ &CtReq->un.PortID;
+ size = sizeof(struct lpfc_name) + FOURBYTES;
+ memcpy((uint8_t *)&pab->PortName,
+ (uint8_t *)&vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
pab->ab.EntryCnt = 0;
/* #1 Port attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
- ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_FC4_TYPES);
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 32);
- ae->un.SupportFC4Types[2] = 1;
- ae->un.SupportFC4Types[7] = 1;
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.FC4Types));
+ ad->AttrType =
+ cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 32);
+ ae->un.FC4Types[0] = 0x40; /* Type 1 - ELS */
+ ae->un.FC4Types[1] = 0x80; /* Type 8 - FCP */
+ ae->un.FC4Types[4] = 0x80; /* Type 32 - CT */
pab->ab.EntryCnt++;
size += FOURBYTES + 32;
/* #2 Port attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
- ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_SPEED);
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
-
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_SPEED);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
ae->un.SupportSpeed = 0;
if (phba->lmt & LMT_16Gb)
ae->un.SupportSpeed |= HBA_PORTSPEED_16GBIT;
@@ -1602,15 +1790,19 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
ae->un.SupportSpeed |= HBA_PORTSPEED_2GBIT;
if (phba->lmt & LMT_1Gb)
ae->un.SupportSpeed |= HBA_PORTSPEED_1GBIT;
+ ae->un.SupportSpeed =
+ cpu_to_be32(ae->un.SupportSpeed);
pab->ab.EntryCnt++;
size += FOURBYTES + 4;
/* #3 Port attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
- ae->ad.bits.AttrType = be16_to_cpu(PORT_SPEED);
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
- switch(phba->fc_linkspeed) {
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_PORT_SPEED);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ switch (phba->fc_linkspeed) {
case LPFC_LINK_SPEED_1GHZ:
ae->un.PortSpeed = HBA_PORTSPEED_1GBIT;
break;
@@ -1633,93 +1825,273 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
ae->un.PortSpeed = HBA_PORTSPEED_UNKNOWN;
break;
}
+ ae->un.PortSpeed = cpu_to_be32(ae->un.PortSpeed);
pab->ab.EntryCnt++;
size += FOURBYTES + 4;
/* #4 Port attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
- ae->ad.bits.AttrType = be16_to_cpu(MAX_FRAME_SIZE);
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
- hsp = (struct serv_parm *) & vport->fc_sparam;
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_MAX_FRAME_SIZE);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ hsp = (struct serv_parm *)&vport->fc_sparam;
ae->un.MaxFrameSize =
- (((uint32_t) hsp->cmn.
- bbRcvSizeMsb) << 8) | (uint32_t) hsp->cmn.
+ (((uint32_t)hsp->cmn.
+ bbRcvSizeMsb) << 8) | (uint32_t)hsp->cmn.
bbRcvSizeLsb;
+ ae->un.MaxFrameSize =
+ cpu_to_be32(ae->un.MaxFrameSize);
pab->ab.EntryCnt++;
size += FOURBYTES + 4;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
/* #5 Port attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
- ae->ad.bits.AttrType = be16_to_cpu(OS_DEVICE_NAME);
- strcpy((char *)ae->un.OsDeviceName, LPFC_DRIVER_NAME);
- len = strlen((char *)ae->un.OsDeviceName);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.OsDeviceName));
+ ad->AttrType = cpu_to_be16(RPRT_OS_DEVICE_NAME);
+ strncpy((char *)ae->un.OsDeviceName, LPFC_DRIVER_NAME,
+ sizeof(ae->un.OsDeviceName));
+ len = strnlen((char *)ae->un.OsDeviceName,
+ sizeof(ae->un.OsDeviceName));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
pab->ab.EntryCnt++;
size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #6 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.HostName));
+ snprintf(ae->un.HostName, sizeof(ae->un.HostName), "%s",
+ init_utsname()->nodename);
+ ad->AttrType = cpu_to_be16(RPRT_HOST_NAME);
+ len = strnlen(ae->un.HostName,
+ sizeof(ae->un.HostName));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen =
+ cpu_to_be16(FOURBYTES + len);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + len;
+ if ((size + sizeof(struct lpfc_name)) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
- if (vport->cfg_fdmi_on == 2) {
- /* #6 Port attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab +
- size);
- ae->ad.bits.AttrType = be16_to_cpu(HOST_NAME);
- sprintf(ae->un.HostName, "%s",
- init_utsname()->nodename);
- len = strlen(ae->un.HostName);
- len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen =
- be16_to_cpu(FOURBYTES + len);
- pab->ab.EntryCnt++;
- size += FOURBYTES + len;
- }
-
- pab->ab.EntryCnt = be32_to_cpu(pab->ab.EntryCnt);
+ /*
+ * Currently switches don't seem to support the
+ * following extended Port attributes.
+ */
+ if (!(vport->cfg_fdmi_on & LPFC_FDMI_ALL_ATTRIB))
+ goto port_out;
+
+ /* #7 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(struct lpfc_name));
+ ad->AttrType = cpu_to_be16(RPRT_NODENAME);
+ ad->AttrLen = cpu_to_be16(FOURBYTES
+ + sizeof(struct lpfc_name));
+ memcpy(&ae->un.NodeName, &vport->fc_sparam.nodeName,
+ sizeof(struct lpfc_name));
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + sizeof(struct lpfc_name);
+ if ((size + sizeof(struct lpfc_name)) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #8 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(struct lpfc_name));
+ ad->AttrType = cpu_to_be16(RPRT_PORTNAME);
+ ad->AttrLen = cpu_to_be16(FOURBYTES
+ + sizeof(struct lpfc_name));
+ memcpy(&ae->un.PortName, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + sizeof(struct lpfc_name);
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #9 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.NodeSymName));
+ ad->AttrType = cpu_to_be16(RPRT_SYM_PORTNAME);
+ len = lpfc_vport_symbolic_port_name(vport,
+ ae->un.NodeSymName, sizeof(ae->un.NodeSymName));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + len;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #10 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_PORT_TYPE);
+ ae->un.PortState = 0;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #11 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_CLASS);
+ ae->un.SupportClass =
+ cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+ if ((size + sizeof(struct lpfc_name)) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #12 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(struct lpfc_name));
+ ad->AttrType = cpu_to_be16(RPRT_FABRICNAME);
+ ad->AttrLen = cpu_to_be16(FOURBYTES
+ + sizeof(struct lpfc_name));
+ memcpy(&ae->un.FabricName, &vport->fabric_nodename,
+ sizeof(struct lpfc_name));
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + sizeof(struct lpfc_name);
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #13 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.FC4Types));
+ ad->AttrType =
+ cpu_to_be16(RPRT_ACTIVE_FC4_TYPES);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 32);
+ ae->un.FC4Types[0] = 0x40; /* Type 1 - ELS */
+ ae->un.FC4Types[1] = 0x80; /* Type 8 - FCP */
+ ae->un.FC4Types[4] = 0x80; /* Type 32 - CT */
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 32;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #257 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_PORT_STATE);
+ ae->un.PortState = 0;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #258 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_DISC_PORT);
+ ae->un.PortState = lpfc_find_map_node(vport);
+ ae->un.PortState = cpu_to_be32(ae->un.PortState);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #259 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_PORT_ID);
+ ae->un.PortId = cpu_to_be32(vport->fc_myDID);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+port_out:
+ pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt);
/* Total size */
size = GID_REQUEST_SZ - 4 + size;
}
break;
+ case SLI_MGMT_GHAT:
+ case SLI_MGMT_GRPL:
+ rsp_size = FC_MAX_NS_RSP;
case SLI_MGMT_DHBA:
- CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DHBA);
- CtReq->CommandResponse.bits.Size = 0;
- pe = (PORT_ENTRY *) & CtReq->un.PortID;
- memcpy((uint8_t *) & pe->PortName,
- (uint8_t *) & vport->fc_sparam.portName,
- sizeof (struct lpfc_name));
- size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
+ case SLI_MGMT_DHAT:
+ pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+ memcpy((uint8_t *)&pe->PortName,
+ (uint8_t *)&vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name);
break;
+ case SLI_MGMT_GPAT:
+ case SLI_MGMT_GPAS:
+ rsp_size = FC_MAX_NS_RSP;
case SLI_MGMT_DPRT:
- CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DPRT);
- CtReq->CommandResponse.bits.Size = 0;
- pe = (PORT_ENTRY *) & CtReq->un.PortID;
- memcpy((uint8_t *) & pe->PortName,
- (uint8_t *) & vport->fc_sparam.portName,
- sizeof (struct lpfc_name));
- size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
+ case SLI_MGMT_DPA:
+ pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+ memcpy((uint8_t *)&pe->PortName,
+ (uint8_t *)&vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name);
+ break;
+ case SLI_MGMT_GRHL:
+ size = GID_REQUEST_SZ - 4;
break;
+ default:
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
+ "0298 FDMI cmdcode x%x not supported\n",
+ cmdcode);
+ goto fdmi_cmd_free_bmpvirt;
}
+ CtReq->CommandResponse.bits.Size = cpu_to_be16(rsp_size);
- bpl = (struct ulp_bde64 *) bmp->virt;
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
- bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
+ bpl = (struct ulp_bde64 *)bmp->virt;
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
+ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
bpl->tus.f.bdeFlags = 0;
bpl->tus.f.bdeSize = size;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
-
- cmpl = lpfc_cmpl_ct_cmd_fdmi;
- /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
+ /*
+ * The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
* to hold ndlp reference for the corresponding callback function.
*/
- if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0))
+ if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, 0))
return 0;
- /* Decrement ndlp reference count to release ndlp reference held
+ /*
+ * Decrement ndlp reference count to release ndlp reference held
* for the failed command's callback function.
*/
lpfc_nlp_put(ndlp);
+fdmi_cmd_free_bmpvirt:
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
fdmi_cmd_free_bmp:
kfree(bmp);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 5633e7dadc08..513edcb0c2da 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2007-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index c66088d0fd2a..851e8efe364e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -2243,8 +2243,7 @@ lpfc_adisc_done(struct lpfc_vport *vport)
*/
if (vport->port_state < LPFC_VPORT_READY) {
/* If we get here, there is nothing to ADISC */
- if (vport->port_type == LPFC_PHYSICAL_PORT)
- lpfc_issue_clear_la(phba, vport);
+ lpfc_issue_clear_la(phba, vport);
if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
vport->num_disc_nodes = 0;
/* go thru NPR list, issue ELS PLOGIs */
@@ -3338,7 +3337,11 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* FLOGI retry policy */
retry = 1;
/* retry FLOGI forever */
- maxretry = 0;
+ if (phba->link_flag != LS_LOOPBACK_MODE)
+ maxretry = 0;
+ else
+ maxretry = 2;
+
if (cmdiocb->retry >= 100)
delay = 5000;
else if (cmdiocb->retry >= 32)
@@ -3701,6 +3704,11 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
if (ndlp) {
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
if (NLP_CHK_NODE_ACT(ndlp)) {
lpfc_nlp_put(ndlp);
/* This is the end of the default RPI cleanup logic for
@@ -5198,7 +5206,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
port_state = vport->port_state;
vport->fc_flag |= FC_PT2PT;
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
- vport->port_state = LPFC_FLOGI;
spin_unlock_irq(shost->host_lock);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3311 Rcv Flogi PS x%x new PS x%x "
@@ -7173,7 +7180,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
return;
}
- if (vport->cfg_fdmi_on) {
+ if (vport->cfg_fdmi_on & LPFC_FDMI_SUPPORT) {
/* If this is the first time, allocate an ndlp and initialize
* it. Otherwise, make sure the node is enabled and then do the
* login.
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 5452f1f4220e..2500f15d437f 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -3439,6 +3439,11 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->context1 = NULL;
pmb->context2 = NULL;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
@@ -3855,6 +3860,11 @@ out:
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
if (vport->port_state < LPFC_VPORT_READY) {
/* Link up discovery requires Fabric registration. */
@@ -4250,8 +4260,15 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0008 rpi:%x DID:%x flg:%x refcnt:%d "
+ "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
+ }
if (state != NLP_STE_UNUSED_NODE)
@@ -4276,9 +4293,12 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
return;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
lpfc_cleanup_vports_rrqs(vport, ndlp);
- lpfc_nlp_put(ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
+ } else {
+ lpfc_nlp_put(ndlp);
+ }
return;
}
@@ -4515,7 +4535,17 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
mbox->context1 = ndlp;
mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
} else {
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ (!(vport->load_flag & FC_UNLOADING)) &&
+ (bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2)) {
+ mbox->context1 = lpfc_nlp_get(ndlp);
+ mbox->mbox_cmpl =
+ lpfc_sli4_unreg_rpi_cmpl_clr;
+ } else
+ mbox->mbox_cmpl =
+ lpfc_sli_def_mbox_cmpl;
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4741,6 +4771,11 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* For this case we need to cleanup the default rpi
* allocated by the firmware.
*/
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
!= NULL) {
rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
@@ -5070,8 +5105,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
!(vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_RSCN_MODE) &&
(phba->sli_rev < LPFC_SLI_REV4)) {
- if (vport->port_type == LPFC_PHYSICAL_PORT)
- lpfc_issue_clear_la(phba, vport);
+ lpfc_issue_clear_la(phba, vport);
lpfc_issue_reg_vpi(phba, vport);
return;
}
@@ -5082,8 +5116,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
*/
if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
/* If we get here, there is nothing to ADISC */
- if (vport->port_type == LPFC_PHYSICAL_PORT)
- lpfc_issue_clear_la(phba, vport);
+ lpfc_issue_clear_la(phba, vport);
if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
vport->num_disc_nodes = 0;
@@ -5484,18 +5517,22 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
-
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
/*
* Start issuing Fabric-Device Management Interface (FDMI) command to
* 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
* fdmi-on=2 (supporting RPA/hostnmae)
*/
- if (vport->cfg_fdmi_on == 1)
- lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
- else
+ if (vport->cfg_fdmi_on & LPFC_FDMI_REG_DELAY)
mod_timer(&vport->fc_fdmitmo,
jiffies + msecs_to_jiffies(1000 * 60));
+ else
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
/* decrement the node reference count held for this callback
* function.
@@ -5650,6 +5687,13 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
INIT_LIST_HEAD(&ndlp->nlp_listp);
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0007 rpi:%x DID:%x flg:%x refcnt:%d "
+ "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
+
ndlp->active_rrqs_xri_bitmap =
mempool_alloc(vport->phba->active_rrq_pool,
GFP_KERNEL);
@@ -5684,9 +5728,9 @@ lpfc_nlp_release(struct kref *kref)
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"0279 lpfc_nlp_release: ndlp:x%p did %x "
- "usgmap:x%x refcnt:%d\n",
+ "usgmap:x%x refcnt:%d rpi:%x\n",
(void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount));
+ atomic_read(&ndlp->kref.refcount), ndlp->nlp_rpi);
/* remove ndlp from action. */
lpfc_nlp_remove(ndlp->vport, ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 236259252379..37beb9dc1311 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -107,6 +107,7 @@ struct lpfc_sli_ct_request {
uint8_t ReasonCode;
uint8_t Explanation;
uint8_t VendorUnique;
+#define LPFC_CT_PREAMBLE 20 /* Size of CTReq + 4 up to here */
union {
uint32_t PortID;
@@ -170,6 +171,8 @@ struct lpfc_sli_ct_request {
} un;
};
+#define LPFC_MAX_CT_SIZE (60 * 4096)
+
#define SLI_CT_REVISION 1
#define GID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct gid))
@@ -1007,78 +1010,45 @@ typedef struct _ELS_PKT { /* Structure is in Big Endian format */
} un;
} ELS_PKT;
-/*
- * FDMI
- * HBA MAnagement Operations Command Codes
- */
-#define SLI_MGMT_GRHL 0x100 /* Get registered HBA list */
-#define SLI_MGMT_GHAT 0x101 /* Get HBA attributes */
-#define SLI_MGMT_GRPL 0x102 /* Get registered Port list */
-#define SLI_MGMT_GPAT 0x110 /* Get Port attributes */
-#define SLI_MGMT_RHBA 0x200 /* Register HBA */
-#define SLI_MGMT_RHAT 0x201 /* Register HBA attributes */
-#define SLI_MGMT_RPRT 0x210 /* Register Port */
-#define SLI_MGMT_RPA 0x211 /* Register Port attributes */
-#define SLI_MGMT_DHBA 0x300 /* De-register HBA */
-#define SLI_MGMT_DPRT 0x310 /* De-register Port */
+/******** FDMI ********/
-/*
- * Management Service Subtypes
- */
-#define SLI_CT_FDMI_Subtypes 0x10
+/* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
+#define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */
/*
- * HBA Management Service Reject Code
+ * Registered Port List Format
*/
-#define REJECT_CODE 0x9 /* Unable to perform command request */
+struct lpfc_fdmi_reg_port_list {
+ uint32_t EntryCnt;
+ uint32_t pe; /* Variable-length array */
+};
-/*
- * HBA Management Service Reject Reason Code
- * Please refer to the Reason Codes above
- */
-/*
- * HBA Attribute Types
- */
-#define NODE_NAME 0x1
-#define MANUFACTURER 0x2
-#define SERIAL_NUMBER 0x3
-#define MODEL 0x4
-#define MODEL_DESCRIPTION 0x5
-#define HARDWARE_VERSION 0x6
-#define DRIVER_VERSION 0x7
-#define OPTION_ROM_VERSION 0x8
-#define FIRMWARE_VERSION 0x9
-#define OS_NAME_VERSION 0xa
-#define MAX_CT_PAYLOAD_LEN 0xb
+/* Definitions for HBA / Port attribute entries */
-/*
- * Port Attrubute Types
- */
-#define SUPPORTED_FC4_TYPES 0x1
-#define SUPPORTED_SPEED 0x2
-#define PORT_SPEED 0x3
-#define MAX_FRAME_SIZE 0x4
-#define OS_DEVICE_NAME 0x5
-#define HOST_NAME 0x6
-
-union AttributesDef {
+struct lpfc_fdmi_attr_def { /* Defined in TLV format */
/* Structure is in Big Endian format */
- struct {
- uint32_t AttrType:16;
- uint32_t AttrLen:16;
- } bits;
- uint32_t word;
+ uint32_t AttrType:16;
+ uint32_t AttrLen:16;
+ uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */
};
-/*
- * HBA Attribute Entry (8 - 260 bytes)
- */
-typedef struct {
- union AttributesDef ad;
+/* Attribute Entry */
+struct lpfc_fdmi_attr_entry {
union {
uint32_t VendorSpecific;
+ uint32_t SupportClass;
+ uint32_t SupportSpeed;
+ uint32_t PortSpeed;
+ uint32_t MaxFrameSize;
+ uint32_t MaxCTPayloadLen;
+ uint32_t PortState;
+ uint32_t PortId;
+ struct lpfc_name NodeName;
+ struct lpfc_name PortName;
+ struct lpfc_name FabricName;
+ uint8_t FC4Types[32];
uint8_t Manufacturer[64];
uint8_t SerialNumber[64];
uint8_t Model[256];
@@ -1087,97 +1057,115 @@ typedef struct {
uint8_t DriverVersion[256];
uint8_t OptionROMVersion[256];
uint8_t FirmwareVersion[256];
- struct lpfc_name NodeName;
- uint8_t SupportFC4Types[32];
- uint32_t SupportSpeed;
- uint32_t PortSpeed;
- uint32_t MaxFrameSize;
+ uint8_t OsHostName[256];
+ uint8_t NodeSymName[256];
uint8_t OsDeviceName[256];
uint8_t OsNameVersion[256];
- uint32_t MaxCTPayloadLen;
uint8_t HostName[256];
} un;
-} ATTRIBUTE_ENTRY;
+};
+
+#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry)
/*
* HBA Attribute Block
*/
-typedef struct {
- uint32_t EntryCnt; /* Number of HBA attribute entries */
- ATTRIBUTE_ENTRY Entry; /* Variable-length array */
-} ATTRIBUTE_BLOCK;
+struct lpfc_fdmi_attr_block {
+ uint32_t EntryCnt; /* Number of HBA attribute entries */
+ struct lpfc_fdmi_attr_entry Entry; /* Variable-length array */
+};
/*
* Port Entry
*/
-typedef struct {
+struct lpfc_fdmi_port_entry {
struct lpfc_name PortName;
-} PORT_ENTRY;
+};
/*
* HBA Identifier
*/
-typedef struct {
+struct lpfc_fdmi_hba_ident {
struct lpfc_name PortName;
-} HBA_IDENTIFIER;
-
-/*
- * Registered Port List Format
- */
-typedef struct {
- uint32_t EntryCnt;
- PORT_ENTRY pe; /* Variable-length array */
-} REG_PORT_LIST;
+};
/*
* Register HBA(RHBA)
*/
-typedef struct {
- HBA_IDENTIFIER hi;
- REG_PORT_LIST rpl; /* variable-length array */
-/* ATTRIBUTE_BLOCK ab; */
-} REG_HBA;
+struct lpfc_fdmi_reg_hba {
+ struct lpfc_fdmi_hba_ident hi;
+ struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */
+/* struct lpfc_fdmi_attr_block ab; */
+};
/*
* Register HBA Attributes (RHAT)
*/
-typedef struct {
+struct lpfc_fdmi_reg_hbaattr {
struct lpfc_name HBA_PortName;
- ATTRIBUTE_BLOCK ab;
-} REG_HBA_ATTRIBUTE;
+ struct lpfc_fdmi_attr_block ab;
+};
/*
* Register Port Attributes (RPA)
*/
-typedef struct {
+struct lpfc_fdmi_reg_portattr {
struct lpfc_name PortName;
- ATTRIBUTE_BLOCK ab;
-} REG_PORT_ATTRIBUTE;
+ struct lpfc_fdmi_attr_block ab;
+};
/*
- * Get Registered HBA List (GRHL) Accept Payload Format
+ * HBA MAnagement Operations Command Codes
*/
-typedef struct {
- uint32_t HBA__Entry_Cnt; /* Number of Registered HBA Identifiers */
- struct lpfc_name HBA_PortName; /* Variable-length array */
-} GRHL_ACC_PAYLOAD;
+#define SLI_MGMT_GRHL 0x100 /* Get registered HBA list */
+#define SLI_MGMT_GHAT 0x101 /* Get HBA attributes */
+#define SLI_MGMT_GRPL 0x102 /* Get registered Port list */
+#define SLI_MGMT_GPAT 0x110 /* Get Port attributes */
+#define SLI_MGMT_GPAS 0x120 /* Get Port Statistics */
+#define SLI_MGMT_RHBA 0x200 /* Register HBA */
+#define SLI_MGMT_RHAT 0x201 /* Register HBA attributes */
+#define SLI_MGMT_RPRT 0x210 /* Register Port */
+#define SLI_MGMT_RPA 0x211 /* Register Port attributes */
+#define SLI_MGMT_DHBA 0x300 /* De-register HBA */
+#define SLI_MGMT_DHAT 0x301 /* De-register HBA attributes */
+#define SLI_MGMT_DPRT 0x310 /* De-register Port */
+#define SLI_MGMT_DPA 0x311 /* De-register Port attributes */
/*
- * Get Registered Port List (GRPL) Accept Payload Format
+ * HBA Attribute Types
*/
-typedef struct {
- uint32_t RPL_Entry_Cnt; /* Number of Registered Port Entries */
- PORT_ENTRY Reg_Port_Entry[1]; /* Variable-length array */
-} GRPL_ACC_PAYLOAD;
+#define RHBA_NODENAME 0x1 /* 8 byte WWNN */
+#define RHBA_MANUFACTURER 0x2 /* 4 to 64 byte ASCII string */
+#define RHBA_SERIAL_NUMBER 0x3 /* 4 to 64 byte ASCII string */
+#define RHBA_MODEL 0x4 /* 4 to 256 byte ASCII string */
+#define RHBA_MODEL_DESCRIPTION 0x5 /* 4 to 256 byte ASCII string */
+#define RHBA_HARDWARE_VERSION 0x6 /* 4 to 256 byte ASCII string */
+#define RHBA_DRIVER_VERSION 0x7 /* 4 to 256 byte ASCII string */
+#define RHBA_OPTION_ROM_VERSION 0x8 /* 4 to 256 byte ASCII string */
+#define RHBA_FIRMWARE_VERSION 0x9 /* 4 to 256 byte ASCII string */
+#define RHBA_OS_NAME_VERSION 0xa /* 4 to 256 byte ASCII string */
+#define RHBA_MAX_CT_PAYLOAD_LEN 0xb /* 32-bit unsigned int */
+#define RHBA_SYM_NODENAME 0xc /* 4 to 256 byte ASCII string */
/*
- * Get Port Attributes (GPAT) Accept Payload Format
+ * Port Attrubute Types
*/
-
-typedef struct {
- ATTRIBUTE_BLOCK pab;
-} GPAT_ACC_PAYLOAD;
-
+#define RPRT_SUPPORTED_FC4_TYPES 0x1 /* 32 byte binary array */
+#define RPRT_SUPPORTED_SPEED 0x2 /* 32-bit unsigned int */
+#define RPRT_PORT_SPEED 0x3 /* 32-bit unsigned int */
+#define RPRT_MAX_FRAME_SIZE 0x4 /* 32-bit unsigned int */
+#define RPRT_OS_DEVICE_NAME 0x5 /* 4 to 256 byte ASCII string */
+#define RPRT_HOST_NAME 0x6 /* 4 to 256 byte ASCII string */
+#define RPRT_NODENAME 0x7 /* 8 byte WWNN */
+#define RPRT_PORTNAME 0x8 /* 8 byte WWNN */
+#define RPRT_SYM_PORTNAME 0x9 /* 4 to 256 byte ASCII string */
+#define RPRT_PORT_TYPE 0xa /* 32-bit unsigned int */
+#define RPRT_SUPPORTED_CLASS 0xb /* 32-bit unsigned int */
+#define RPRT_FABRICNAME 0xc /* 8 byte Fabric WWNN */
+#define RPRT_ACTIVE_FC4_TYPES 0xd /* 32 byte binary array */
+#define RPRT_PORT_STATE 0x101 /* 32-bit unsigned int */
+#define RPRT_DISC_PORT 0x102 /* 32-bit unsigned int */
+#define RPRT_PORT_ID 0x103 /* 32-bit unsigned int */
/*
* Begin HBA configuration parameters.
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index f432ec180cf8..1813c45946f4 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -3085,6 +3085,9 @@ struct lpfc_acqe_link {
#define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2
#define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3
#define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4
+#define LPFC_ASYNC_LINK_SPEED_20GBPS 0x5
+#define LPFC_ASYNC_LINK_SPEED_25GBPS 0x6
+#define LPFC_ASYNC_LINK_SPEED_40GBPS 0x7
#define lpfc_acqe_link_duplex_SHIFT 16
#define lpfc_acqe_link_duplex_MASK 0x000000FF
#define lpfc_acqe_link_duplex_WORD word0
@@ -3166,7 +3169,7 @@ struct lpfc_acqe_fc_la {
#define lpfc_acqe_fc_la_speed_SHIFT 24
#define lpfc_acqe_fc_la_speed_MASK 0x000000FF
#define lpfc_acqe_fc_la_speed_WORD word0
-#define LPFC_FC_LA_SPEED_UNKOWN 0x0
+#define LPFC_FC_LA_SPEED_UNKNOWN 0x0
#define LPFC_FC_LA_SPEED_1G 0x1
#define LPFC_FC_LA_SPEED_2G 0x2
#define LPFC_FC_LA_SPEED_4G 0x4
@@ -3244,6 +3247,7 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_NVLOG_POST 0x4
#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5
#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
+#define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA
};
/*
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 0b2c53af85c7..e8c8c1ecc1f5 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -1330,13 +1330,14 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
void
lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
{
+ spin_lock_irq(&phba->hbalock);
+ phba->link_state = LPFC_HBA_ERROR;
+ spin_unlock_irq(&phba->hbalock);
+
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
lpfc_offline(phba);
- lpfc_sli4_brdreset(phba);
lpfc_hba_down_post(phba);
- lpfc_sli4_post_status_check(phba);
lpfc_unblock_mgmt_io(phba);
- phba->link_state = LPFC_HBA_ERROR;
}
/**
@@ -1629,6 +1630,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
uint32_t uerrlo_reg, uemasklo_reg;
uint32_t pci_rd_rc1, pci_rd_rc2;
bool en_rn_msg = true;
+ struct temp_event temp_event_data;
int rc;
/* If the pci channel is offline, ignore possible errors, since
@@ -1636,9 +1638,6 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
*/
if (pci_channel_offline(phba->pcidev))
return;
- /* If resets are disabled then leave the HBA alone and return */
- if (!phba->cfg_enable_hba_reset)
- return;
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
switch (if_type) {
@@ -1654,6 +1653,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
return;
lpfc_sli4_offline_eratt(phba);
break;
+
case LPFC_SLI_INTF_IF_TYPE_2:
pci_rd_rc1 = lpfc_readl(
phba->sli4_hba.u.if_type2.STATUSregaddr,
@@ -1668,15 +1668,27 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
- /* TODO: Register for Overtemp async events. */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2889 Port Overtemperature event, "
- "taking port offline\n");
+ "taking port offline Data: x%x x%x\n",
+ reg_err1, reg_err2);
+
+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+ temp_event_data.event_code = LPFC_CRIT_TEMP;
+ temp_event_data.data = 0xFFFFFFFF;
+
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data),
+ (char *)&temp_event_data,
+ SCSI_NL_VID_TYPE_PCI
+ | PCI_VENDOR_ID_EMULEX);
+
spin_lock_irq(&phba->hbalock);
phba->over_temp_state = HBA_OVER_TEMP;
spin_unlock_irq(&phba->hbalock);
lpfc_sli4_offline_eratt(phba);
- break;
+ return;
}
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
@@ -1693,6 +1705,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3145 Port Down: Provisioning\n");
+ /* If resets are disabled then leave the HBA alone and return */
+ if (!phba->cfg_enable_hba_reset)
+ return;
+
/* Check port status register for function reset */
rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
en_rn_msg);
@@ -2759,9 +2775,19 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
- if (NLP_CHK_NODE_ACT(ndlp))
+ if (NLP_CHK_NODE_ACT(ndlp)) {
ndlp->nlp_rpi =
lpfc_sli4_alloc_rpi(phba);
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NODE,
+ "0009 rpi:%x DID:%x "
+ "flg:%x map:%x %p\n",
+ ndlp->nlp_rpi,
+ ndlp->nlp_DID,
+ ndlp->nlp_flag,
+ ndlp->nlp_usg_map,
+ ndlp);
+ }
}
}
}
@@ -2925,8 +2951,18 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
* RPI. Get a new RPI when the adapter port
* comes back online.
*/
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_printf_vlog(ndlp->vport,
+ KERN_INFO, LOG_NODE,
+ "0011 lpfc_offline: "
+ "ndlp:x%p did %x "
+ "usgmap:x%x rpi:%x\n",
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_usg_map,
+ ndlp->nlp_rpi);
+
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+ }
lpfc_unreg_rpi(vports[i], ndlp);
}
}
@@ -3241,12 +3277,17 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
struct Scsi_Host *shost;
int error = 0;
- if (dev != &phba->pcidev->dev)
+ if (dev != &phba->pcidev->dev) {
shost = scsi_host_alloc(&lpfc_vport_template,
sizeof(struct lpfc_vport));
- else
- shost = scsi_host_alloc(&lpfc_template,
+ } else {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ shost = scsi_host_alloc(&lpfc_template,
sizeof(struct lpfc_vport));
+ else
+ shost = scsi_host_alloc(&lpfc_template_s3,
+ sizeof(struct lpfc_vport));
+ }
if (!shost)
goto out;
@@ -3685,6 +3726,11 @@ lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
case LPFC_ASYNC_LINK_SPEED_10GBPS:
link_speed = LPFC_LINK_SPEED_10GHZ;
break;
+ case LPFC_ASYNC_LINK_SPEED_20GBPS:
+ case LPFC_ASYNC_LINK_SPEED_25GBPS:
+ case LPFC_ASYNC_LINK_SPEED_40GBPS:
+ link_speed = LPFC_LINK_SPEED_UNKNOWN;
+ break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0483 Invalid link-attention link speed: x%x\n",
@@ -3756,46 +3802,55 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
switch (evt_code) {
case LPFC_TRAILER_CODE_LINK:
switch (speed_code) {
- case LPFC_EVT_CODE_LINK_NO_LINK:
+ case LPFC_ASYNC_LINK_SPEED_ZERO:
port_speed = 0;
break;
- case LPFC_EVT_CODE_LINK_10_MBIT:
+ case LPFC_ASYNC_LINK_SPEED_10MBPS:
port_speed = 10;
break;
- case LPFC_EVT_CODE_LINK_100_MBIT:
+ case LPFC_ASYNC_LINK_SPEED_100MBPS:
port_speed = 100;
break;
- case LPFC_EVT_CODE_LINK_1_GBIT:
+ case LPFC_ASYNC_LINK_SPEED_1GBPS:
port_speed = 1000;
break;
- case LPFC_EVT_CODE_LINK_10_GBIT:
+ case LPFC_ASYNC_LINK_SPEED_10GBPS:
port_speed = 10000;
break;
+ case LPFC_ASYNC_LINK_SPEED_20GBPS:
+ port_speed = 20000;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_25GBPS:
+ port_speed = 25000;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_40GBPS:
+ port_speed = 40000;
+ break;
default:
port_speed = 0;
}
break;
case LPFC_TRAILER_CODE_FC:
switch (speed_code) {
- case LPFC_EVT_CODE_FC_NO_LINK:
+ case LPFC_FC_LA_SPEED_UNKNOWN:
port_speed = 0;
break;
- case LPFC_EVT_CODE_FC_1_GBAUD:
+ case LPFC_FC_LA_SPEED_1G:
port_speed = 1000;
break;
- case LPFC_EVT_CODE_FC_2_GBAUD:
+ case LPFC_FC_LA_SPEED_2G:
port_speed = 2000;
break;
- case LPFC_EVT_CODE_FC_4_GBAUD:
+ case LPFC_FC_LA_SPEED_4G:
port_speed = 4000;
break;
- case LPFC_EVT_CODE_FC_8_GBAUD:
+ case LPFC_FC_LA_SPEED_8G:
port_speed = 8000;
break;
- case LPFC_EVT_CODE_FC_10_GBAUD:
+ case LPFC_FC_LA_SPEED_10G:
port_speed = 10000;
break;
- case LPFC_EVT_CODE_FC_16_GBAUD:
+ case LPFC_FC_LA_SPEED_16G:
port_speed = 16000;
break;
default:
@@ -4044,18 +4099,21 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
char port_name;
char message[128];
uint8_t status;
+ uint8_t evt_type;
+ struct temp_event temp_event_data;
struct lpfc_acqe_misconfigured_event *misconfigured;
+ struct Scsi_Host *shost;
+
+ evt_type = bf_get(lpfc_trailer_type, acqe_sli);
- /* special case misconfigured event as it contains data for all ports */
- if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
- LPFC_SLI_INTF_IF_TYPE_2) ||
- (bf_get(lpfc_trailer_type, acqe_sli) !=
- LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) {
+ /* Special case Lancer */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2901 Async SLI event - Event Data1:x%08x Event Data2:"
"x%08x SLI Event Type:%d\n",
acqe_sli->event_data1, acqe_sli->event_data2,
- bf_get(lpfc_trailer_type, acqe_sli));
+ evt_type);
return;
}
@@ -4063,58 +4121,107 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
if (port_name == 0x00)
port_name = '?'; /* get port name is empty */
- misconfigured = (struct lpfc_acqe_misconfigured_event *)
+ switch (evt_type) {
+ case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+ temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
+ temp_event_data.data = (uint32_t)acqe_sli->event_data1;
+
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3190 Over Temperature:%d Celsius- Port Name %c\n",
+ acqe_sli->event_data1, port_name);
+
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data),
+ (char *)&temp_event_data,
+ SCSI_NL_VID_TYPE_PCI
+ | PCI_VENDOR_ID_EMULEX);
+ break;
+ case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+ temp_event_data.event_code = LPFC_NORMAL_TEMP;
+ temp_event_data.data = (uint32_t)acqe_sli->event_data1;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3191 Normal Temperature:%d Celsius - Port Name %c\n",
+ acqe_sli->event_data1, port_name);
+
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data),
+ (char *)&temp_event_data,
+ SCSI_NL_VID_TYPE_PCI
+ | PCI_VENDOR_ID_EMULEX);
+ break;
+ case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
+ misconfigured = (struct lpfc_acqe_misconfigured_event *)
&acqe_sli->event_data1;
- /* fetch the status for this port */
- switch (phba->sli4_hba.lnk_info.lnk_no) {
- case LPFC_LINK_NUMBER_0:
- status = bf_get(lpfc_sli_misconfigured_port0,
+ /* fetch the status for this port */
+ switch (phba->sli4_hba.lnk_info.lnk_no) {
+ case LPFC_LINK_NUMBER_0:
+ status = bf_get(lpfc_sli_misconfigured_port0,
&misconfigured->theEvent);
- break;
- case LPFC_LINK_NUMBER_1:
- status = bf_get(lpfc_sli_misconfigured_port1,
+ break;
+ case LPFC_LINK_NUMBER_1:
+ status = bf_get(lpfc_sli_misconfigured_port1,
&misconfigured->theEvent);
- break;
- case LPFC_LINK_NUMBER_2:
- status = bf_get(lpfc_sli_misconfigured_port2,
+ break;
+ case LPFC_LINK_NUMBER_2:
+ status = bf_get(lpfc_sli_misconfigured_port2,
&misconfigured->theEvent);
- break;
- case LPFC_LINK_NUMBER_3:
- status = bf_get(lpfc_sli_misconfigured_port3,
+ break;
+ case LPFC_LINK_NUMBER_3:
+ status = bf_get(lpfc_sli_misconfigured_port3,
&misconfigured->theEvent);
- break;
- default:
- status = ~LPFC_SLI_EVENT_STATUS_VALID;
- break;
- }
+ break;
+ default:
+ status = ~LPFC_SLI_EVENT_STATUS_VALID;
+ break;
+ }
- switch (status) {
- case LPFC_SLI_EVENT_STATUS_VALID:
- return; /* no message if the sfp is okay */
- case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
- sprintf(message, "Optics faulted/incorrectly installed/not " \
- "installed - Reseat optics, if issue not "
- "resolved, replace.");
- break;
- case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
- sprintf(message,
- "Optics of two types installed - Remove one optic or " \
- "install matching pair of optics.");
- break;
- case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
- sprintf(message, "Incompatible optics - Replace with " \
+ switch (status) {
+ case LPFC_SLI_EVENT_STATUS_VALID:
+ return; /* no message if the sfp is okay */
+ case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
+ sprintf(message, "Optics faulted/incorrectly "
+ "installed/not installed - Reseat optics, "
+ "if issue not resolved, replace.");
+ break;
+ case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
+ sprintf(message,
+ "Optics of two types installed - Remove one "
+ "optic or install matching pair of optics.");
+ break;
+ case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
+ sprintf(message, "Incompatible optics - Replace with "
"compatible optics for card to function.");
+ break;
+ default:
+ /* firmware is reporting a status we don't know about */
+ sprintf(message, "Unknown event status x%02x", status);
+ break;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3176 Misconfigured Physical Port - "
+ "Port Name %c %s\n", port_name, message);
+ break;
+ case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3192 Remote DPort Test Initiated - "
+ "Event Data1:x%08x Event Data2: x%08x\n",
+ acqe_sli->event_data1, acqe_sli->event_data2);
break;
default:
- /* firmware is reporting a status we don't know about */
- sprintf(message, "Unknown event status x%02x", status);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3193 Async SLI event - Event Data1:x%08x Event Data2:"
+ "x%08x SLI Event Type:%d\n",
+ acqe_sli->event_data1, acqe_sli->event_data2,
+ evt_type);
break;
}
-
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "3176 Misconfigured Physical Port - "
- "Port Name %c %s\n", port_name, message);
}
/**
@@ -5183,6 +5290,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
rc = lpfc_pci_function_reset(phba);
if (unlikely(rc))
return -ENODEV;
+ phba->temp_sensor_support = 1;
}
/* Create the bootstrap mailbox command */
@@ -7647,6 +7755,14 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
goto out_destroy_els_rq;
}
}
+
+ /*
+ * Configure EQ delay multipier for interrupt coalescing using
+ * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time.
+ */
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
+ fcp_eqidx += LPFC_MAX_EQ_DELAY)
+ lpfc_modify_fcp_eq_delay(phba, fcp_eqidx);
return 0;
out_destroy_els_rq:
@@ -7953,7 +8069,7 @@ wait:
* up to 30 seconds. If the port doesn't respond, treat
* it as an error.
*/
- for (rdy_chk = 0; rdy_chk < 3000; rdy_chk++) {
+ for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
if (lpfc_readl(phba->sli4_hba.u.if_type2.
STATUSregaddr, &reg_data.word0)) {
rc = -ENODEV;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 06241f590c1e..816f596cda60 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 5cc1103d811e..4cb9882af157 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -276,6 +276,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
+ uint64_t nlp_portwwn = 0;
uint32_t *lp;
IOCB_t *icmd;
struct serv_parm *sp;
@@ -332,6 +333,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
NULL);
return 0;
}
+
+ nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
/* Reject this request because invalid parameters */
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
@@ -367,7 +370,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
- /* no need to reg_login if we are already in one of these states */
+ /* if already logged in, do implicit logout */
switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE:
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
@@ -376,8 +379,26 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
case NLP_STE_MAPPED_NODE:
- lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
- return 1;
+ /* lpfc_plogi_confirm_nport skips fabric did, handle it here */
+ if (!(ndlp->nlp_type & NLP_FABRIC)) {
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
+ ndlp, NULL);
+ return 1;
+ }
+ if (nlp_portwwn != 0 &&
+ nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0143 PLOGI recv'd from DID: x%x "
+ "WWPN changed: old %llx new %llx\n",
+ ndlp->nlp_DID,
+ (unsigned long long)nlp_portwwn,
+ (unsigned long long)
+ wwn_to_u64(sp->portName.u.wwn));
+
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ /* rport needs to be unregistered first */
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ break;
}
/* Check for Nport to NPort pt2pt protocol */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 4f9222eb2266..cb73cf9e9ba5 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -1130,6 +1130,25 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
}
/**
+ * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
+ * @data: A pointer to the immediate command data portion of the IOCB.
+ * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
+ *
+ * The routine copies the entire FCP command from @fcp_cmnd to @data while
+ * byte swapping the data to big endian format for transmission on the wire.
+ **/
+static void
+lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
+ i += sizeof(uint32_t), j++) {
+ ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
+ }
+}
+
+/**
* lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
* @phba: The Hba for which this call is being executed.
* @lpfc_cmd: The scsi buffer which is going to be mapped.
@@ -1264,6 +1283,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* we need to set word 4 of IOCB here
*/
iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+ lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
return 0;
}
@@ -4127,24 +4147,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
/**
- * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
- * @data: A pointer to the immediate command data portion of the IOCB.
- * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
- *
- * The routine copies the entire FCP command from @fcp_cmnd to @data while
- * byte swapping the data to big endian format for transmission on the wire.
- **/
-static void
-lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
-{
- int i, j;
- for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
- i += sizeof(uint32_t), j++) {
- ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
- }
-}
-
-/**
* lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: The scsi command which needs to send.
@@ -4223,9 +4225,6 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
fcp_cmnd->fcpCntl3 = 0;
phba->fc4ControlRequests++;
}
- if (phba->sli_rev == 3 &&
- !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
- lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
/*
* Finish initializing those IOCB fields that are independent
* of the scsi_cmnd request_buffer
@@ -5118,9 +5117,10 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
int status;
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
- if (!rdata) {
+ if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0798 Device Reset rport failure: rdata x%p\n", rdata);
+ "0798 Device Reset rport failure: rdata x%p\n",
+ rdata);
return FAILED;
}
pnode = rdata->pnode;
@@ -5202,10 +5202,12 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0722 Target Reset rport failure: rdata x%p\n", rdata);
- spin_lock_irq(shost->host_lock);
- pnode->nlp_flag &= ~NLP_NPR_ADISC;
- pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- spin_unlock_irq(shost->host_lock);
+ if (pnode) {
+ spin_lock_irq(shost->host_lock);
+ pnode->nlp_flag &= ~NLP_NPR_ADISC;
+ pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ spin_unlock_irq(shost->host_lock);
+ }
lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_TGT);
return FAST_IO_FAIL;
@@ -5857,6 +5859,31 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
return false;
}
+struct scsi_host_template lpfc_template_s3 = {
+ .module = THIS_MODULE,
+ .name = LPFC_DRIVER_NAME,
+ .info = lpfc_info,
+ .queuecommand = lpfc_queuecommand,
+ .eh_abort_handler = lpfc_abort_handler,
+ .eh_device_reset_handler = lpfc_device_reset_handler,
+ .eh_target_reset_handler = lpfc_target_reset_handler,
+ .eh_bus_reset_handler = lpfc_bus_reset_handler,
+ .slave_alloc = lpfc_slave_alloc,
+ .slave_configure = lpfc_slave_configure,
+ .slave_destroy = lpfc_slave_destroy,
+ .scan_finished = lpfc_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
+ .cmd_per_lun = LPFC_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = lpfc_hba_attrs,
+ .max_sectors = 0xFFFF,
+ .vendor_id = LPFC_NL_VENDOR_ID,
+ .change_queue_depth = scsi_change_queue_depth,
+ .use_blk_tags = 1,
+ .track_queue_depth = 1,
+};
+
struct scsi_host_template lpfc_template = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 0389ac1e7b83..474e30cdee6e 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 207a43d952fa..56f73682d4bd 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -918,12 +918,16 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
ndlp = lpfc_cmd->rdata->pnode;
} else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
- !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
+ !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
ndlp = piocbq->context_un.ndlp;
- else if (piocbq->iocb_flag & LPFC_IO_LIBDFC)
- ndlp = piocbq->context_un.ndlp;
- else
+ } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
+ if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
+ ndlp = NULL;
+ else
+ ndlp = piocbq->context_un.ndlp;
+ } else {
ndlp = piocbq->context1;
+ }
list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
start_sglq = sglq;
@@ -2213,6 +2217,46 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
else
mempool_free(pmb, phba->mbox_mem_pool);
}
+ /**
+ * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function is the unreg rpi mailbox completion handler. It
+ * frees the memory resources associated with the completed mailbox
+ * command. An additional refrenece is put on the ndlp to prevent
+ * lpfc_nlp_release from freeing the rpi bit in the bitmask before
+ * the unreg mailbox command completes, this routine puts the
+ * reference back.
+ *
+ **/
+void
+lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = pmb->context1;
+ if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ (bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2)) {
+ if (ndlp) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0010 UNREG_LOGIN vpi:%x "
+ "rpi:%x DID:%x map:%x %p\n",
+ vport->vpi, ndlp->nlp_rpi,
+ ndlp->nlp_DID,
+ ndlp->nlp_usg_map, ndlp);
+
+ lpfc_nlp_put(ndlp);
+ }
+ }
+ }
+
+ mempool_free(pmb, phba->mbox_mem_pool);
+}
/**
* lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
@@ -12842,7 +12886,7 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
* fails this function will return -ENXIO.
**/
int
-lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
+lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq)
{
struct lpfc_mbx_modify_eq_delay *eq_delay;
LPFC_MBOXQ_t *mbox;
@@ -12959,11 +13003,8 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
LPFC_EQE_SIZE);
bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
- /* Calculate delay multiper from maximum interrupt per second */
- if (imax > LPFC_DMULT_CONST)
- dmult = 0;
- else
- dmult = LPFC_DMULT_CONST/imax - 1;
+ /* don't setup delay multiplier using EQ_CREATE */
+ dmult = 0;
bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
dmult);
switch (eq->entry_count) {
@@ -15662,14 +15703,14 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
struct lpfc_rpi_hdr *rpi_hdr;
unsigned long iflag;
- max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
- rpi_limit = phba->sli4_hba.next_rpi;
-
/*
* Fetch the next logical rpi. Because this index is logical,
* the driver starts at 0 each time.
*/
spin_lock_irqsave(&phba->hbalock, iflag);
+ max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+ rpi_limit = phba->sli4_hba.next_rpi;
+
rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
@@ -15678,6 +15719,9 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.rpi_used++;
phba->sli4_hba.rpi_count++;
}
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0001 rpi:%x max:%x lim:%x\n",
+ (int) rpi, max_rpi, rpi_limit);
/*
* Don't try to allocate more rpi header regions if the device limit
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 4a01452415cf..7fe99ff80846 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -80,6 +80,7 @@ struct lpfc_iocbq {
#define LPFC_IO_OAS 0x10000 /* OAS FCP IO */
#define LPFC_IO_FOF 0x20000 /* FOF FCP IO */
+#define LPFC_IO_LOOPBACK 0x40000 /* Loopback IO */
uint32_t drvrTimeout; /* driver timeout in seconds */
uint32_t fcp_wqidx; /* index to FCP work queue */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 22ceb2b05ba1..6eca3b8124d3 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -671,7 +671,7 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
uint32_t);
void lpfc_sli4_queue_free(struct lpfc_queue *);
int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
-int lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t);
+int lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint32_t);
int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t, uint32_t);
int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 89413add2252..c37bb9f91c3b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "10.4.8000.0."
+#define LPFC_DRIVER_VERSION "10.5.0.0."
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -30,4 +30,4 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright(c) 2004-2014 Emulex. All rights reserved."
+#define LPFC_COPYRIGHT "Copyright(c) 2004-2015 Emulex. All rights reserved."
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 1e85c07e3b62..d64a769b8155 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -483,7 +483,6 @@ static struct platform_driver mac_scsi_driver = {
.remove = __exit_p(mac_scsi_remove),
.driver = {
.name = DRV_MODULE_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 113e6c9826a1..33f60c92e20e 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -18,6 +18,9 @@ config SCSI_QLA_FC
2322, 6322 ql2322_fw.bin
24xx, 54xx ql2400_fw.bin
25xx ql2500_fw.bin
+ 2031 ql2600_fw.bin
+ 8031 ql8300_fw.bin
+ 27xx ql2700_fw.bin
Upon request, the driver caches the firmware image until
the driver is unloaded.
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index d77fe43793b6..0e6ee3ca30e6 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,9 +11,9 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x017d | 0x0144,0x0146 |
+ * | Module Init and Probe | 0x017f | 0x0146 |
* | | | 0x015b-0x0160 |
- * | | | 0x016e-0x0170 |
+ * | | | 0x016e-0x0170 |
* | Mailbox commands | 0x118d | 0x1115-0x1116 |
* | | | 0x111a-0x111b |
* | Device Discovery | 0x2016 | 0x2020-0x2022, |
@@ -60,7 +60,7 @@
* | | | 0xb13c-0xb140 |
* | | | 0xb149 |
* | MultiQ | 0xc00c | |
- * | Misc | 0xd213 | 0xd011-0xd017 |
+ * | Misc | 0xd300 | 0xd016-0xd017 |
* | | | 0xd021,0xd024 |
* | | | 0xd025,0xd029 |
* | | | 0xd02a,0xd02e |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 5f6b2960cccb..e86201d3b8c6 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2163,7 +2163,7 @@ struct ct_fdmi_hba_attr {
uint8_t node_name[WWN_SIZE];
uint8_t manufacturer[64];
uint8_t serial_num[32];
- uint8_t model[16];
+ uint8_t model[16+1];
uint8_t model_desc[80];
uint8_t hw_version[32];
uint8_t driver_version[32];
@@ -2184,9 +2184,9 @@ struct ct_fdmiv2_hba_attr {
uint16_t len;
union {
uint8_t node_name[WWN_SIZE];
- uint8_t manufacturer[32];
+ uint8_t manufacturer[64];
uint8_t serial_num[32];
- uint8_t model[16];
+ uint8_t model[16+1];
uint8_t model_desc[80];
uint8_t hw_version[16];
uint8_t driver_version[32];
@@ -2252,7 +2252,7 @@ struct ct_fdmiv2_port_attr {
uint32_t cur_speed;
uint32_t max_frame_size;
uint8_t os_dev_name[32];
- uint8_t host_name[32];
+ uint8_t host_name[256];
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
uint8_t port_sym_name[128];
@@ -2283,7 +2283,7 @@ struct ct_fdmi_port_attr {
uint32_t cur_speed;
uint32_t max_frame_size;
uint8_t os_dev_name[32];
- uint8_t host_name[32];
+ uint8_t host_name[256];
} a;
};
@@ -3132,7 +3132,8 @@ struct qla_hw_data {
IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
IS_QLA8044(ha) || IS_QLA27XX(ha))
-#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+ IS_QLA27XX(ha))
#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
IS_QLA27XX(ha))
@@ -3300,6 +3301,8 @@ struct qla_hw_data {
#define RISC_RDY_AFT_RESET 3
#define RISC_SRAM_DUMP_CMPL 4
#define RISC_EXT_MEM_DUMP_CMPL 5
+#define ISP_MBX_RDY 6
+#define ISP_SOFT_RESET_CMPL 7
int fw_dump_reading;
int prev_minidump_failed;
dma_addr_t eft_dma;
@@ -3587,6 +3590,7 @@ typedef struct scsi_qla_host {
#define VP_BIND_NEEDED 2
#define VP_DELETE_NEEDED 3
#define VP_SCR_NEEDED 4 /* State Change Request registration */
+#define VP_CONFIG_OK 5 /* Flag to cfg VP, if FW is ready */
atomic_t vp_state;
#define VP_OFFLINE 0
#define VP_ACTIVE 1
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 5bb57c5282c9..285cb204f300 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1121,7 +1121,7 @@ qla81xx_reset_mpi(scsi_qla_host_t *vha)
*
* Returns 0 on success.
*/
-static inline void
+static inline int
qla24xx_reset_risc(scsi_qla_host_t *vha)
{
unsigned long flags = 0;
@@ -1130,6 +1130,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
uint32_t cnt, d2;
uint16_t wd;
static int abts_cnt; /* ISP abort retry counts */
+ int rval = QLA_SUCCESS;
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1142,26 +1143,57 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
udelay(10);
}
+ if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
+ set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
+ "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
+ RD_REG_DWORD(&reg->hccr),
+ RD_REG_DWORD(&reg->ctrl_status),
+ (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
+
WRT_REG_DWORD(&reg->ctrl_status,
CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
udelay(100);
+
/* Wait for firmware to complete NVRAM accesses. */
d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
- for (cnt = 10000 ; cnt && d2; cnt--) {
- udelay(5);
- d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
+ for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ rval == QLA_SUCCESS; cnt--) {
barrier();
+ if (cnt)
+ udelay(5);
+ else
+ rval = QLA_FUNCTION_TIMEOUT;
}
+ if (rval == QLA_SUCCESS)
+ set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
+ "HCCR: 0x%x, MailBox0 Status 0x%x\n",
+ RD_REG_DWORD(&reg->hccr),
+ RD_REG_DWORD(&reg->mailbox0));
+
/* Wait for soft-reset to complete. */
d2 = RD_REG_DWORD(&reg->ctrl_status);
- for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) {
- udelay(5);
- d2 = RD_REG_DWORD(&reg->ctrl_status);
+ for (cnt = 0; cnt < 6000000; cnt++) {
barrier();
+ if ((RD_REG_DWORD(&reg->ctrl_status) &
+ CSRX_ISP_SOFT_RESET) == 0)
+ break;
+
+ udelay(5);
}
+ if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
+ set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
+ "HCCR: 0x%x, Soft Reset status: 0x%x\n",
+ RD_REG_DWORD(&reg->hccr),
+ RD_REG_DWORD(&reg->ctrl_status));
/* If required, do an MPI FW reset now */
if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
@@ -1190,16 +1222,32 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
RD_REG_DWORD(&reg->hccr);
d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
- for (cnt = 6000000 ; cnt && d2; cnt--) {
- udelay(5);
- d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
+ for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ rval == QLA_SUCCESS; cnt--) {
barrier();
+ if (cnt)
+ udelay(5);
+ else
+ rval = QLA_FUNCTION_TIMEOUT;
}
+ if (rval == QLA_SUCCESS)
+ set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
+ "Host Risc 0x%x, mailbox0 0x%x\n",
+ RD_REG_DWORD(&reg->hccr),
+ RD_REG_WORD(&reg->mailbox0));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
+ "Driver in %s mode\n",
+ IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
+
if (IS_NOPOLLING_TYPE(ha))
ha->isp_ops->enable_intrs(ha);
+
+ return rval;
}
static void
@@ -2243,8 +2291,11 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
rval = QLA_SUCCESS;
- /* 20 seconds for loop down. */
- min_wait = 20;
+ /* Time to wait for loop down */
+ if (IS_P3P_TYPE(ha))
+ min_wait = 30;
+ else
+ min_wait = 20;
/*
* Firmware should take at most one RATOV to login, plus 5 seconds for
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index a04a1b1f7f32..6dc14cd782b2 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -756,11 +756,21 @@ skip_rio:
/*
* In case of loop down, restore WWPN from
* NVRAM in case of FA-WWPN capable ISP
+ * Restore for Physical Port only
*/
- if (ha->flags.fawwpn_enabled) {
- void *wwpn = ha->init_cb->port_name;
+ if (!vha->vp_idx) {
+ if (ha->flags.fawwpn_enabled) {
+ void *wwpn = ha->init_cb->port_name;
+ memcpy(vha->port_name, wwpn, WWN_SIZE);
+ fc_host_port_name(vha->host) =
+ wwn_to_u64(vha->port_name);
+ ql_dbg(ql_dbg_init + ql_dbg_verbose,
+ vha, 0x0144, "LOOP DOWN detected,"
+ "restore WWPN %016llx\n",
+ wwn_to_u64(vha->port_name));
+ }
- memcpy(vha->port_name, wwpn, WWN_SIZE);
+ clear_bit(VP_CONFIG_OK, &vha->vp_flags);
}
vha->device_flags |= DFLG_NO_CABLE;
@@ -947,6 +957,7 @@ skip_rio:
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(VP_CONFIG_OK, &vha->vp_flags);
qlt_async_event(mb[0], vha, mb);
break;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 72971daa2552..02b1c1c5355b 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -33,7 +33,7 @@
static int
qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
{
- int rval;
+ int rval, i;
unsigned long flags = 0;
device_reg_t *reg;
uint8_t abort_active;
@@ -43,10 +43,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
uint16_t __iomem *optr;
uint32_t cnt;
uint32_t mboxes;
+ uint16_t __iomem *mbx_reg;
unsigned long wait_time;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
if (ha->pdev->error_state > pci_channel_io_frozen) {
@@ -376,6 +378,18 @@ mbx_done:
ql_dbg(ql_dbg_disc, base_vha, 0x1020,
"**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
+
+ ql_dbg(ql_dbg_disc, vha, 0x1115,
+ "host status: 0x%x, flags:0x%lx, intr ctrl reg:0x%x, intr status:0x%x\n",
+ RD_REG_DWORD(&reg->isp24.host_status),
+ ha->fw_dump_cap_flags,
+ RD_REG_DWORD(&reg->isp24.ictrl),
+ RD_REG_DWORD(&reg->isp24.istatus));
+
+ mbx_reg = &reg->isp24.mailbox0;
+ for (i = 0; i < 6; i++)
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x1116,
+ "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
} else {
ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
}
@@ -2838,7 +2852,7 @@ qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA2031(vha->hw))
+ if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
@@ -2846,7 +2860,11 @@ qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
mcp->mb[0] = MBC_WRITE_SERDES;
mcp->mb[1] = addr;
- mcp->mb[2] = data & 0xff;
+ if (IS_QLA2031(vha->hw))
+ mcp->mb[2] = data & 0xff;
+ else
+ mcp->mb[2] = data;
+
mcp->mb[3] = 0;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
@@ -2872,7 +2890,7 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA2031(vha->hw))
+ if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
@@ -2887,7 +2905,10 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
- *data = mcp->mb[1] & 0xff;
+ if (IS_QLA2031(vha->hw))
+ *data = mcp->mb[1] & 0xff;
+ else
+ *data = mcp->mb[1];
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1186,
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index ca3804e34833..cc94192511cf 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -306,19 +306,25 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
static int
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
"Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
qla2x00_do_work(vha);
- if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
- /* VP acquired. complete port configuration */
- ql_dbg(ql_dbg_dpc, vha, 0x4014,
- "Configure VP scheduled.\n");
- qla24xx_configure_vp(vha);
- ql_dbg(ql_dbg_dpc, vha, 0x4015,
- "Configure VP end.\n");
- return 0;
+ /* Check if Fw is ready to configure VP first */
+ if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
+ if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
+ /* VP acquired. complete port configuration */
+ ql_dbg(ql_dbg_dpc, vha, 0x4014,
+ "Configure VP scheduled.\n");
+ qla24xx_configure_vp(vha);
+ ql_dbg(ql_dbg_dpc, vha, 0x4015,
+ "Configure VP end.\n");
+ return 0;
+ }
}
if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 5319b3cb219e..7462dd70b150 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -5834,3 +5834,6 @@ MODULE_FIRMWARE(FW_FILE_ISP2300);
MODULE_FIRMWARE(FW_FILE_ISP2322);
MODULE_FIRMWARE(FW_FILE_ISP24XX);
MODULE_FIRMWARE(FW_FILE_ISP25XX);
+MODULE_FIRMWARE(FW_FILE_ISP2031);
+MODULE_FIRMWARE(FW_FILE_ISP8031);
+MODULE_FIRMWARE(FW_FILE_ISP27XX);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index b656a05613e8..028e8c8a7de9 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1718,13 +1718,16 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha)
uint16_t orig_led_cfg[6];
uint32_t led_10_value, led_43_value;
- if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha))
return;
if (!ha->beacon_blink_led)
return;
- if (IS_QLA2031(ha)) {
+ if (IS_QLA27XX(ha)) {
+ qla2x00_write_ram_word(vha, 0x1003, 0x40000230);
+ qla2x00_write_ram_word(vha, 0x1004, 0x40000230);
+ } else if (IS_QLA2031(ha)) {
led_select_value = qla83xx_select_led_port(ha);
qla83xx_wr_reg(vha, led_select_value, 0x40000230);
@@ -1811,7 +1814,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
return QLA_FUNCTION_FAILED;
}
- if (IS_QLA2031(ha))
+ if (IS_QLA2031(ha) || IS_QLA27XX(ha))
goto skip_gpio;
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1848,7 +1851,7 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
ha->beacon_blink_led = 0;
- if (IS_QLA2031(ha))
+ if (IS_QLA2031(ha) || IS_QLA27XX(ha))
goto set_fw_options;
if (IS_QLA8031(ha) || IS_QLA81XX(ha))
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index a8c0c7362e48..962cb89fe0ae 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -190,7 +190,7 @@ static inline void
qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
uint offset, uint32_t data, void *buf)
{
- __iomem void *window = reg + offset;
+ __iomem void *window = (void __iomem *)reg + offset;
if (buf) {
WRT_REG_DWORD(window, data);
@@ -219,6 +219,8 @@ qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
{
if (buf)
ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
+ ql_dbg(ql_dbg_misc + ql_dbg_verbose, NULL, 0xd011,
+ "Skipping entry %d\n", ent->hdr.entry_type);
}
static int
@@ -784,6 +786,13 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_misc, vha, 0xd01b,
"%s: len=%lx\n", __func__, *len);
+
+ if (buf) {
+ ql_log(ql_log_warn, vha, 0xd015,
+ "Firmware dump saved to temp buffer (%ld/%p)\n",
+ vha->host_no, vha->hw->fw_dump);
+ qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+ }
}
static void
@@ -938,6 +947,10 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
else if (!vha->hw->fw_dump_template)
ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
+ else if (vha->hw->fw_dumped)
+ ql_log(ql_log_warn, vha, 0xd300,
+ "Firmware has been previously dumped (%p),"
+ " -- ignoring request\n", vha->hw->fw_dump);
else
qla27xx_execute_fwdt_template(vha);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index d88b86214ec5..2ed9ab90a455 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.07.00.16-k"
+#define QLA2XXX_VERSION "8.07.00.18-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 7
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index c9c3b579eece..3833bf59fb66 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -972,18 +972,24 @@ EXPORT_SYMBOL(scsi_report_opcode);
* Description: Gets a reference to the scsi_device and increments the use count
* of the underlying LLDD module. You must hold host_lock of the
* parent Scsi_Host or already have a reference when calling this.
+ *
+ * This will fail if a device is deleted or cancelled, or when the LLD module
+ * is in the process of being unloaded.
*/
int scsi_device_get(struct scsi_device *sdev)
{
- if (sdev->sdev_state == SDEV_DEL)
- return -ENXIO;
+ if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
+ goto fail;
if (!get_device(&sdev->sdev_gendev))
- return -ENXIO;
- /* We can fail try_module_get if we're doing SCSI operations
- * from module exit (like cache flush) */
- __module_get(sdev->host->hostt->module);
-
+ goto fail;
+ if (!try_module_get(sdev->host->hostt->module))
+ goto fail_put_device;
return 0;
+
+fail_put_device:
+ put_device(&sdev->sdev_gendev);
+fail:
+ return -ENXIO;
}
EXPORT_SYMBOL(scsi_device_get);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 9c0a520d933c..60aae01caa89 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1570,16 +1570,15 @@ EXPORT_SYMBOL(scsi_add_device);
void scsi_rescan_device(struct device *dev)
{
- if (!dev->driver)
- return;
-
- if (try_module_get(dev->driver->owner)) {
+ device_lock(dev);
+ if (dev->driver && try_module_get(dev->driver->owner)) {
struct scsi_driver *drv = to_scsi_driver(dev->driver);
if (drv->rescan)
drv->rescan(dev);
module_put(dev->driver->owner);
}
+ device_unlock(dev);
}
EXPORT_SYMBOL(scsi_rescan_device);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 5d6f348eb3d8..24eaaf66af71 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -265,6 +265,7 @@ static const struct {
{ FC_PORTSPEED_40GBIT, "40 Gbit" },
{ FC_PORTSPEED_50GBIT, "50 Gbit" },
{ FC_PORTSPEED_100GBIT, "100 Gbit" },
+ { FC_PORTSPEED_25GBIT, "25 Gbit" },
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
};
fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 6b78476d04bb..dcc42446f58a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -564,10 +564,12 @@ static int sd_major(int major_idx)
}
}
-static struct scsi_disk *__scsi_disk_get(struct gendisk *disk)
+static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
{
struct scsi_disk *sdkp = NULL;
+ mutex_lock(&sd_ref_mutex);
+
if (disk->private_data) {
sdkp = scsi_disk(disk);
if (scsi_device_get(sdkp->device) == 0)
@@ -575,27 +577,6 @@ static struct scsi_disk *__scsi_disk_get(struct gendisk *disk)
else
sdkp = NULL;
}
- return sdkp;
-}
-
-static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
-{
- struct scsi_disk *sdkp;
-
- mutex_lock(&sd_ref_mutex);
- sdkp = __scsi_disk_get(disk);
- mutex_unlock(&sd_ref_mutex);
- return sdkp;
-}
-
-static struct scsi_disk *scsi_disk_get_from_dev(struct device *dev)
-{
- struct scsi_disk *sdkp;
-
- mutex_lock(&sd_ref_mutex);
- sdkp = dev_get_drvdata(dev);
- if (sdkp)
- sdkp = __scsi_disk_get(sdkp->disk);
mutex_unlock(&sd_ref_mutex);
return sdkp;
}
@@ -610,8 +591,6 @@ static void scsi_disk_put(struct scsi_disk *sdkp)
mutex_unlock(&sd_ref_mutex);
}
-
-
static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
unsigned int dix, unsigned int dif)
{
@@ -1525,12 +1504,9 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
static void sd_rescan(struct device *dev)
{
- struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
- if (sdkp) {
- revalidate_disk(sdkp->disk);
- scsi_disk_put(sdkp);
- }
+ revalidate_disk(sdkp->disk);
}
@@ -2235,11 +2211,11 @@ got_data:
{
char cap_str_2[10], cap_str_10[10];
- u64 sz = (u64)sdkp->capacity << ilog2(sector_size);
- string_get_size(sz, STRING_UNITS_2, cap_str_2,
- sizeof(cap_str_2));
- string_get_size(sz, STRING_UNITS_10, cap_str_10,
+ string_get_size(sdkp->capacity, sector_size,
+ STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
+ string_get_size(sdkp->capacity, sector_size,
+ STRING_UNITS_10, cap_str_10,
sizeof(cap_str_10));
if (sdkp->first_scan || old_capacity != sdkp->capacity) {
@@ -3149,13 +3125,13 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
*/
static void sd_shutdown(struct device *dev)
{
- struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
if (!sdkp)
return; /* this can happen */
if (pm_runtime_suspended(dev))
- goto exit;
+ return;
if (sdkp->WCE && sdkp->media_present) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
@@ -3166,14 +3142,11 @@ static void sd_shutdown(struct device *dev)
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
sd_start_stop_device(sdkp, 0);
}
-
-exit:
- scsi_disk_put(sdkp);
}
static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
{
- struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
int ret = 0;
if (!sdkp)
@@ -3199,7 +3172,6 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
}
done:
- scsi_disk_put(sdkp);
return ret;
}
@@ -3215,18 +3187,13 @@ static int sd_suspend_runtime(struct device *dev)
static int sd_resume(struct device *dev)
{
- struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
- int ret = 0;
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
if (!sdkp->device->manage_start_stop)
- goto done;
+ return 0;
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
- ret = sd_start_stop_device(sdkp, 1);
-
-done:
- scsi_disk_put(sdkp);
- return ret;
+ return sd_start_stop_device(sdkp, 1);
}
/**
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index efc6e446b6c8..d9dad90344d5 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -308,11 +308,16 @@ enum storvsc_request_type {
* This is the end of Protocol specific defines.
*/
-static int storvsc_ringbuffer_size = (20 * PAGE_SIZE);
+static int storvsc_ringbuffer_size = (256 * PAGE_SIZE);
+static u32 max_outstanding_req_per_channel;
+
+static int storvsc_vcpus_per_sub_channel = 4;
module_param(storvsc_ringbuffer_size, int, S_IRUGO);
MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
+module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO);
+MODULE_PARM_DESC(vcpus_per_sub_channel, "Ratio of VCPUs to subchannels");
/*
* Timeout in seconds for all devices managed by this driver.
*/
@@ -320,7 +325,6 @@ static int storvsc_timeout = 180;
static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
-#define STORVSC_MAX_IO_REQUESTS 200
static void storvsc_on_channel_callback(void *context);
@@ -347,7 +351,10 @@ struct storvsc_cmd_request {
/* Synchronize the request/response if needed */
struct completion wait_event;
- struct hv_multipage_buffer data_buffer;
+ struct vmbus_channel_packet_multipage_buffer mpb;
+ struct vmbus_packet_mpb_array *payload;
+ u32 payload_sz;
+
struct vstor_packet vstor_packet;
};
@@ -373,6 +380,10 @@ struct storvsc_device {
unsigned char path_id;
unsigned char target_id;
+ /*
+ * Max I/O, the device can support.
+ */
+ u32 max_transfer_bytes;
/* Used for vsc/vsp channel reset process */
struct storvsc_cmd_request init_request;
struct storvsc_cmd_request reset_request;
@@ -618,19 +629,6 @@ cleanup:
return NULL;
}
-/* Disgusting wrapper functions */
-static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx)
-{
- void *addr = kmap_atomic(sg_page(sgl + idx));
- return (unsigned long)addr;
-}
-
-static inline void sg_kunmap_atomic(unsigned long addr)
-{
- kunmap_atomic((void *)addr);
-}
-
-
/* Assume the original sgl has enough room */
static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
struct scatterlist *bounce_sgl,
@@ -645,32 +643,38 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
unsigned long bounce_addr = 0;
unsigned long dest_addr = 0;
unsigned long flags;
+ struct scatterlist *cur_dest_sgl;
+ struct scatterlist *cur_src_sgl;
local_irq_save(flags);
-
+ cur_dest_sgl = orig_sgl;
+ cur_src_sgl = bounce_sgl;
for (i = 0; i < orig_sgl_count; i++) {
- dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
+ dest_addr = (unsigned long)
+ kmap_atomic(sg_page(cur_dest_sgl)) +
+ cur_dest_sgl->offset;
dest = dest_addr;
- destlen = orig_sgl[i].length;
+ destlen = cur_dest_sgl->length;
if (bounce_addr == 0)
- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
+ bounce_addr = (unsigned long)kmap_atomic(
+ sg_page(cur_src_sgl));
while (destlen) {
- src = bounce_addr + bounce_sgl[j].offset;
- srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
+ src = bounce_addr + cur_src_sgl->offset;
+ srclen = cur_src_sgl->length - cur_src_sgl->offset;
copylen = min(srclen, destlen);
memcpy((void *)dest, (void *)src, copylen);
total_copied += copylen;
- bounce_sgl[j].offset += copylen;
+ cur_src_sgl->offset += copylen;
destlen -= copylen;
dest += copylen;
- if (bounce_sgl[j].offset == bounce_sgl[j].length) {
+ if (cur_src_sgl->offset == cur_src_sgl->length) {
/* full */
- sg_kunmap_atomic(bounce_addr);
+ kunmap_atomic((void *)bounce_addr);
j++;
/*
@@ -684,21 +688,27 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
/*
* We are done; cleanup and return.
*/
- sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
+ kunmap_atomic((void *)(dest_addr -
+ cur_dest_sgl->offset));
local_irq_restore(flags);
return total_copied;
}
/* if we need to use another bounce buffer */
- if (destlen || i != orig_sgl_count - 1)
- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
+ if (destlen || i != orig_sgl_count - 1) {
+ cur_src_sgl = sg_next(cur_src_sgl);
+ bounce_addr = (unsigned long)
+ kmap_atomic(
+ sg_page(cur_src_sgl));
+ }
} else if (destlen == 0 && i == orig_sgl_count - 1) {
/* unmap the last bounce that is < PAGE_SIZE */
- sg_kunmap_atomic(bounce_addr);
+ kunmap_atomic((void *)bounce_addr);
}
}
- sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
+ kunmap_atomic((void *)(dest_addr - cur_dest_sgl->offset));
+ cur_dest_sgl = sg_next(cur_dest_sgl);
}
local_irq_restore(flags);
@@ -719,48 +729,62 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
unsigned long bounce_addr = 0;
unsigned long src_addr = 0;
unsigned long flags;
+ struct scatterlist *cur_src_sgl;
+ struct scatterlist *cur_dest_sgl;
local_irq_save(flags);
+ cur_src_sgl = orig_sgl;
+ cur_dest_sgl = bounce_sgl;
+
for (i = 0; i < orig_sgl_count; i++) {
- src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
+ src_addr = (unsigned long)
+ kmap_atomic(sg_page(cur_src_sgl)) +
+ cur_src_sgl->offset;
src = src_addr;
- srclen = orig_sgl[i].length;
+ srclen = cur_src_sgl->length;
if (bounce_addr == 0)
- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
+ bounce_addr = (unsigned long)
+ kmap_atomic(sg_page(cur_dest_sgl));
while (srclen) {
/* assume bounce offset always == 0 */
- dest = bounce_addr + bounce_sgl[j].length;
- destlen = PAGE_SIZE - bounce_sgl[j].length;
+ dest = bounce_addr + cur_dest_sgl->length;
+ destlen = PAGE_SIZE - cur_dest_sgl->length;
copylen = min(srclen, destlen);
memcpy((void *)dest, (void *)src, copylen);
total_copied += copylen;
- bounce_sgl[j].length += copylen;
+ cur_dest_sgl->length += copylen;
srclen -= copylen;
src += copylen;
- if (bounce_sgl[j].length == PAGE_SIZE) {
+ if (cur_dest_sgl->length == PAGE_SIZE) {
/* full..move to next entry */
- sg_kunmap_atomic(bounce_addr);
+ kunmap_atomic((void *)bounce_addr);
+ bounce_addr = 0;
j++;
+ }
- /* if we need to use another bounce buffer */
- if (srclen || i != orig_sgl_count - 1)
- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
-
- } else if (srclen == 0 && i == orig_sgl_count - 1) {
- /* unmap the last bounce that is < PAGE_SIZE */
- sg_kunmap_atomic(bounce_addr);
+ /* if we need to use another bounce buffer */
+ if (srclen && bounce_addr == 0) {
+ cur_dest_sgl = sg_next(cur_dest_sgl);
+ bounce_addr = (unsigned long)
+ kmap_atomic(
+ sg_page(cur_dest_sgl));
}
+
}
- sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
+ kunmap_atomic((void *)(src_addr - cur_src_sgl->offset));
+ cur_src_sgl = sg_next(cur_src_sgl);
}
+ if (bounce_addr)
+ kunmap_atomic((void *)bounce_addr);
+
local_irq_restore(flags);
return total_copied;
@@ -970,6 +994,8 @@ static int storvsc_channel_init(struct hv_device *device)
STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
process_sub_channels = true;
}
+ stor_device->max_transfer_bytes =
+ vstor_packet->storage_channel_properties.max_transfer_bytes;
memset(vstor_packet, 0, sizeof(struct vstor_packet));
vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
@@ -1080,6 +1106,8 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
struct Scsi_Host *host;
struct storvsc_device *stor_dev;
struct hv_device *dev = host_dev->dev;
+ u32 payload_sz = cmd_request->payload_sz;
+ void *payload = cmd_request->payload;
stor_dev = get_in_stor_device(dev);
host = stor_dev->host;
@@ -1109,10 +1137,14 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
sense_hdr.ascq);
scsi_set_resid(scmnd,
- cmd_request->data_buffer.len -
+ cmd_request->payload->range.len -
vm_srb->data_transfer_length);
scmnd->scsi_done(scmnd);
+
+ if (payload_sz >
+ sizeof(struct vmbus_channel_packet_multipage_buffer))
+ kfree(payload);
}
static void storvsc_on_io_completion(struct hv_device *device,
@@ -1314,7 +1346,7 @@ static int storvsc_dev_remove(struct hv_device *device)
}
static int storvsc_do_io(struct hv_device *device,
- struct storvsc_cmd_request *request)
+ struct storvsc_cmd_request *request)
{
struct storvsc_device *stor_device;
struct vstor_packet *vstor_packet;
@@ -1346,19 +1378,20 @@ static int storvsc_do_io(struct hv_device *device,
vstor_packet->vm_srb.data_transfer_length =
- request->data_buffer.len;
+ request->payload->range.len;
vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
- if (request->data_buffer.len) {
- ret = vmbus_sendpacket_multipagebuffer(outgoing_channel,
- &request->data_buffer,
+ if (request->payload->range.len) {
+
+ ret = vmbus_sendpacket_mpb_desc(outgoing_channel,
+ request->payload, request->payload_sz,
vstor_packet,
(sizeof(struct vstor_packet) -
vmscsi_size_delta),
(unsigned long)request);
} else {
- ret = vmbus_sendpacket(device->channel, vstor_packet,
+ ret = vmbus_sendpacket(outgoing_channel, vstor_packet,
(sizeof(struct vstor_packet) -
vmscsi_size_delta),
(unsigned long)request,
@@ -1376,7 +1409,6 @@ static int storvsc_do_io(struct hv_device *device,
static int storvsc_device_configure(struct scsi_device *sdevice)
{
- scsi_change_queue_depth(sdevice, STORVSC_MAX_IO_REQUESTS);
blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
@@ -1526,6 +1558,10 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
struct scatterlist *sgl;
unsigned int sg_count = 0;
struct vmscsi_request *vm_srb;
+ struct scatterlist *cur_sgl;
+ struct vmbus_packet_mpb_array *payload;
+ u32 payload_sz;
+ u32 length;
if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
/*
@@ -1579,46 +1615,71 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
- cmd_request->data_buffer.len = scsi_bufflen(scmnd);
- if (scsi_sg_count(scmnd)) {
- sgl = (struct scatterlist *)scsi_sglist(scmnd);
- sg_count = scsi_sg_count(scmnd);
+ sgl = (struct scatterlist *)scsi_sglist(scmnd);
+ sg_count = scsi_sg_count(scmnd);
+
+ length = scsi_bufflen(scmnd);
+ payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
+ payload_sz = sizeof(cmd_request->mpb);
+ if (sg_count) {
/* check if we need to bounce the sgl */
if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
cmd_request->bounce_sgl =
- create_bounce_buffer(sgl, scsi_sg_count(scmnd),
- scsi_bufflen(scmnd),
+ create_bounce_buffer(sgl, sg_count,
+ length,
vm_srb->data_in);
if (!cmd_request->bounce_sgl)
return SCSI_MLQUEUE_HOST_BUSY;
cmd_request->bounce_sgl_count =
- ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
- PAGE_SHIFT;
+ ALIGN(length, PAGE_SIZE) >> PAGE_SHIFT;
if (vm_srb->data_in == WRITE_TYPE)
copy_to_bounce_buffer(sgl,
- cmd_request->bounce_sgl,
- scsi_sg_count(scmnd));
+ cmd_request->bounce_sgl, sg_count);
sgl = cmd_request->bounce_sgl;
sg_count = cmd_request->bounce_sgl_count;
}
- cmd_request->data_buffer.offset = sgl[0].offset;
- for (i = 0; i < sg_count; i++)
- cmd_request->data_buffer.pfn_array[i] =
- page_to_pfn(sg_page((&sgl[i])));
+ if (sg_count > MAX_PAGE_BUFFER_COUNT) {
+
+ payload_sz = (sg_count * sizeof(void *) +
+ sizeof(struct vmbus_packet_mpb_array));
+ payload = kmalloc(payload_sz, GFP_ATOMIC);
+ if (!payload) {
+ if (cmd_request->bounce_sgl_count)
+ destroy_bounce_buffer(
+ cmd_request->bounce_sgl,
+ cmd_request->bounce_sgl_count);
+
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+ }
+
+ payload->range.len = length;
+ payload->range.offset = sgl[0].offset;
+
+ cur_sgl = sgl;
+ for (i = 0; i < sg_count; i++) {
+ payload->range.pfn_array[i] =
+ page_to_pfn(sg_page((cur_sgl)));
+ cur_sgl = sg_next(cur_sgl);
+ }
} else if (scsi_sglist(scmnd)) {
- cmd_request->data_buffer.offset =
+ payload->range.len = length;
+ payload->range.offset =
virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
- cmd_request->data_buffer.pfn_array[0] =
+ payload->range.pfn_array[0] =
virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
}
+ cmd_request->payload = payload;
+ cmd_request->payload_sz = payload_sz;
+
/* Invokes the vsc to start an IO */
ret = storvsc_do_io(dev, cmd_request);
@@ -1646,12 +1707,8 @@ static struct scsi_host_template scsi_driver = {
.eh_timed_out = storvsc_eh_timed_out,
.slave_configure = storvsc_device_configure,
.cmd_per_lun = 255,
- .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
.this_id = -1,
- /* no use setting to 0 since ll_blk_rw reset it to 1 */
- /* currently 32 */
- .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,
- .use_clustering = DISABLE_CLUSTERING,
+ .use_clustering = ENABLE_CLUSTERING,
/* Make sure we dont get a sg segment crosses a page boundary */
.dma_boundary = PAGE_SIZE-1,
.no_write_same = 1,
@@ -1686,6 +1743,7 @@ static int storvsc_probe(struct hv_device *device,
const struct hv_vmbus_device_id *dev_id)
{
int ret;
+ int num_cpus = num_online_cpus();
struct Scsi_Host *host;
struct hv_host_device *host_dev;
bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
@@ -1694,6 +1752,7 @@ static int storvsc_probe(struct hv_device *device,
int max_luns_per_target;
int max_targets;
int max_channels;
+ int max_sub_channels = 0;
/*
* Based on the windows host we are running on,
@@ -1719,12 +1778,18 @@ static int storvsc_probe(struct hv_device *device,
max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET;
max_targets = STORVSC_MAX_TARGETS;
max_channels = STORVSC_MAX_CHANNELS;
+ /*
+ * On Windows8 and above, we support sub-channels for storage.
+ * The number of sub-channels offerred is based on the number of
+ * VCPUs in the guest.
+ */
+ max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel);
break;
}
- if (dev_id->driver_data == SFC_GUID)
- scsi_driver.can_queue = (STORVSC_MAX_IO_REQUESTS *
- STORVSC_FC_MAX_TARGETS);
+ scsi_driver.can_queue = (max_outstanding_req_per_channel *
+ (max_sub_channels + 1));
+
host = scsi_host_alloc(&scsi_driver,
sizeof(struct hv_host_device));
if (!host)
@@ -1780,6 +1845,12 @@ static int storvsc_probe(struct hv_device *device,
/* max cmd length */
host->max_cmd_len = STORVSC_MAX_CMD_LEN;
+ /*
+ * set the table size based on the info we got
+ * from the host.
+ */
+ host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
+
/* Register the HBA and start the scsi bus scan */
ret = scsi_add_host(host, &device->device);
if (ret != 0)
@@ -1837,7 +1908,6 @@ static struct hv_driver storvsc_drv = {
static int __init storvsc_drv_init(void)
{
- u32 max_outstanding_req_per_channel;
/*
* Divide the ring buffer data size (which is 1 page less
@@ -1852,10 +1922,6 @@ static int __init storvsc_drv_init(void)
vmscsi_size_delta,
sizeof(u64)));
- if (max_outstanding_req_per_channel <
- STORVSC_MAX_IO_REQUESTS)
- return -EINVAL;
-
return vmbus_driver_register(&storvsc_drv);
}
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 2a906d1d34ba..22a42836d193 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -676,7 +676,6 @@ static struct platform_driver sun3_scsi_driver = {
.remove = __exit_p(sun3_scsi_remove),
.driver = {
.name = DRV_MODULE_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 9217af9bf734..6652a8171de6 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -214,8 +214,6 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
struct ufs_qcom_host *host = hba->priv;
struct phy *phy = host->generic_phy;
int ret = 0;
- u8 major;
- u16 minor, step;
bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
? true : false;
@@ -224,8 +222,6 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
/* provide 1ms delay to let the reset pulse propagate */
usleep_range(1000, 1100);
- ufs_qcom_get_controller_revision(hba, &major, &minor, &step);
- ufs_qcom_phy_save_controller_version(phy, major, minor, step);
ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
if (ret) {
dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
@@ -698,16 +694,24 @@ out:
*/
static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
{
- u8 major;
- u16 minor, step;
+ struct ufs_qcom_host *host = hba->priv;
- ufs_qcom_get_controller_revision(hba, &major, &minor, &step);
+ if (host->hw_ver.major == 0x1)
+ hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS;
- /*
- * TBD
- * here we should be advertising controller quirks according to
- * controller version.
- */
+ if (host->hw_ver.major >= 0x2) {
+ if (!ufs_qcom_cap_qunipro(host))
+ /* Legacy UniPro mode still need following quirks */
+ hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS;
+ }
+}
+
+static void ufs_qcom_set_caps(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = hba->priv;
+
+ if (host->hw_ver.major >= 0x2)
+ host->caps = UFS_QCOM_CAP_QUNIPRO;
}
static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
@@ -929,6 +933,13 @@ static int ufs_qcom_init(struct ufs_hba *hba)
if (err)
goto out_host_free;
+ ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
+ &host->hw_ver.minor, &host->hw_ver.step);
+
+ /* update phy revision information before calling phy_init() */
+ ufs_qcom_phy_save_controller_version(host->generic_phy,
+ host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
+
phy_init(host->generic_phy);
err = phy_power_on(host->generic_phy);
if (err)
@@ -938,6 +949,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
if (err)
goto out_disable_phy;
+ ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_CLK_SCALING;
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 9a6febd007df..db2c0a00e846 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -151,7 +151,23 @@ struct ufs_qcom_bus_vote {
struct device_attribute max_bus_bw;
};
+/* Host controller hardware version: major.minor.step */
+struct ufs_hw_version {
+ u16 step;
+ u16 minor;
+ u8 major;
+};
struct ufs_qcom_host {
+
+ /*
+ * Set this capability if host controller supports the QUniPro mode
+ * and if driver wants the Host controller to operate in QUniPro mode.
+ * Note: By default this capability will be kept enabled if host
+ * controller supports the QUniPro mode.
+ */
+ #define UFS_QCOM_CAP_QUNIPRO UFS_BIT(0)
+ u32 caps;
+
struct phy *generic_phy;
struct ufs_hba *hba;
struct ufs_qcom_bus_vote bus_vote;
@@ -161,10 +177,20 @@ struct ufs_qcom_host {
struct clk *rx_l1_sync_clk;
struct clk *tx_l1_sync_clk;
bool is_lane_clks_enabled;
+
+ struct ufs_hw_version hw_ver;
};
#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
+static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host)
+{
+ if (host->caps & UFS_QCOM_CAP_QUNIPRO)
+ return true;
+ else
+ return false;
+}
+
#endif /* UFS_QCOM_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 2aa85e398f76..648a44675880 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -183,6 +183,7 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
@@ -972,6 +973,8 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
ufshcd_hold(hba, false);
mutex_lock(&hba->uic_cmd_mutex);
+ ufshcd_add_delay_before_dme_cmd(hba);
+
spin_lock_irqsave(hba->host->host_lock, flags);
ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -2058,6 +2061,37 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
return ret;
}
+static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
+{
+ #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
+ unsigned long min_sleep_time_us;
+
+ if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
+ return;
+
+ /*
+ * last_dme_cmd_tstamp will be 0 only for 1st call to
+ * this function
+ */
+ if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
+ min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
+ } else {
+ unsigned long delta =
+ (unsigned long) ktime_to_us(
+ ktime_sub(ktime_get(),
+ hba->last_dme_cmd_tstamp));
+
+ if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
+ min_sleep_time_us =
+ MIN_DELAY_BEFORE_DME_CMDS_US - delta;
+ else
+ return; /* no more delay required */
+ }
+
+ /* allow sleep for extra 50us if needed */
+ usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
+}
+
/**
* ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
* @hba: per adapter instance
@@ -2157,6 +2191,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
mutex_lock(&hba->uic_cmd_mutex);
init_completion(&uic_async_done);
+ ufshcd_add_delay_before_dme_cmd(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
hba->uic_async_done = &uic_async_done;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 4a574aa45855..b47ff07698e8 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -366,6 +366,7 @@ struct ufs_init_prefetch {
* @saved_err: sticky error mask
* @saved_uic_err: sticky UIC error mask
* @dev_cmd: ufs device management command information
+ * @last_dme_cmd_tstamp: time stamp of the last completed DME command
* @auto_bkops_enabled: to track whether bkops is enabled in device
* @vreg_info: UFS device voltage regulator information
* @clk_list_head: UFS host controller clocks list node head
@@ -416,6 +417,13 @@ struct ufs_hba {
unsigned int irq;
bool is_irq_enabled;
+ /*
+ * delay before each dme command is required as the unipro
+ * layer has shown instabilities
+ */
+ #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(0)
+
+ unsigned int quirks; /* Deviations from standard UFSHCI spec. */
wait_queue_head_t tm_wq;
wait_queue_head_t tm_tag_wq;
@@ -446,6 +454,7 @@ struct ufs_hba {
/* Device management request data */
struct ufs_dev_cmd dev_cmd;
+ ktime_t last_dme_cmd_tstamp;
/* Keeps information of the UFS device connected to this host */
struct ufs_dev_info dev_info;
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 75b3603906c1..f0d22cdb51cd 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -130,6 +130,7 @@ config SSB_DRIVER_MIPS
bool "SSB Broadcom MIPS core driver"
depends on SSB && MIPS
select SSB_SERIAL
+ select SSB_SFLASH
help
Driver for the Sonics Silicon Backplane attached
Broadcom MIPS core.
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index 1173a091b402..09428412139e 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -14,7 +14,7 @@
#include <linux/delay.h>
#include <linux/export.h>
#ifdef CONFIG_BCM47XX
-#include <bcm47xx_nvram.h>
+#include <linux/bcm47xx_nvram.h>
#endif
#include "ssb_private.h"
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 7b986f9f213f..f87efef42252 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -16,7 +16,7 @@
#include <linux/serial_reg.h>
#include <linux/time.h>
#ifdef CONFIG_BCM47XX
-#include <bcm47xx_nvram.h>
+#include <linux/bcm47xx_nvram.h>
#endif
#include "ssb_private.h"
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 91442fab5725..c6c824356464 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -359,8 +359,8 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
* up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */
#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \
~(DT_MAX_BRW_SIZE - 1))
-static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
- struct iov_iter *iter, loff_t file_offset)
+static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t file_offset)
{
struct lu_env *env;
struct cl_io *io;
@@ -399,7 +399,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
* size changing by concurrent truncates and writes.
* 1. Need inode mutex to operate transient pages.
*/
- if (rw == READ)
+ if (iov_iter_rw(iter) == READ)
mutex_lock(&inode->i_mutex);
LASSERT(obj->cob_transient_pages == 0);
@@ -408,7 +408,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
size_t offs;
count = min_t(size_t, iov_iter_count(iter), size);
- if (rw == READ) {
+ if (iov_iter_rw(iter) == READ) {
if (file_offset >= i_size_read(inode))
break;
if (file_offset + count > i_size_read(inode))
@@ -418,11 +418,11 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
result = iov_iter_get_pages_alloc(iter, &pages, count, &offs);
if (likely(result > 0)) {
int n = DIV_ROUND_UP(result + offs, PAGE_SIZE);
- result = ll_direct_IO_26_seg(env, io, rw, inode,
- file->f_mapping,
- result, file_offset,
- pages, n);
- ll_free_user_pages(pages, n, rw==READ);
+ result = ll_direct_IO_26_seg(env, io, iov_iter_rw(iter),
+ inode, file->f_mapping,
+ result, file_offset, pages,
+ n);
+ ll_free_user_pages(pages, n, iov_iter_rw(iter) == READ);
}
if (unlikely(result <= 0)) {
/* If we can't allocate a large enough buffer
@@ -449,11 +449,11 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
}
out:
LASSERT(obj->cob_transient_pages == 0);
- if (rw == READ)
+ if (iov_iter_rw(iter) == READ)
mutex_unlock(&inode->i_mutex);
if (tot_bytes > 0) {
- if (rw == WRITE) {
+ if (iov_iter_rw(iter) == WRITE) {
struct lov_stripe_md *lsm;
lsm = ccc_inode_lsm_get(inode);
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index b7a7854d3f7e..5b9ac1f6d6f0 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -274,6 +274,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
/* Build the PKO command */
pko_command.u64 = 0;
+#ifdef __LITTLE_ENDIAN
+ pko_command.s.le = 1;
+#endif
pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
pko_command.s.segs = 1;
pko_command.s.total_bytes = skb->len;
@@ -410,7 +413,7 @@ dont_put_skbuff_in_hw:
/* Check if we can use the hardware checksumming */
if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) &&
(ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) &&
- ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14))
+ ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == htons(1 << 14)))
&& ((ip_hdr(skb)->protocol == IPPROTO_TCP)
|| (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
/* Use hardware checksum calc */
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index f539d82f2f11..fbbe866485c7 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -170,6 +170,16 @@ static void cvm_oct_configure_common_hw(void)
cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
+#ifdef __LITTLE_ENDIAN
+ {
+ union cvmx_ipd_ctl_status ipd_ctl_status;
+ ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ ipd_ctl_status.s.pkt_lend = 1;
+ ipd_ctl_status.s.wqe_lend = 1;
+ cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
+ }
+#endif
+
if (USE_RED)
cvmx_helper_setup_red(num_packet_buffers / 4,
num_packet_buffers / 8);
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index b24aa010f68c..c01f45095877 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -419,4 +419,51 @@ config DA_CONSOLE
help
This enables a console on a Dash channel.
+config MIPS_EJTAG_FDC_TTY
+ bool "MIPS EJTAG Fast Debug Channel TTY"
+ depends on MIPS_CDMM
+ help
+ This enables a TTY and console on the MIPS EJTAG Fast Debug Channels,
+ if they are present. This can be useful when working with an EJTAG
+ probe which supports it, to get console output and a login prompt via
+ EJTAG without needing to connect a serial cable.
+
+ TTY devices are named e.g. ttyFDC3c2 (for FDC channel 2 of the FDC on
+ CPU3).
+
+ The console can be enabled with console=fdc1 (for FDC channel 1 on all
+ CPUs). Do not use the console unless there is a debug probe attached
+ to drain the FDC TX FIFO.
+
+ If unsure, say N.
+
+config MIPS_EJTAG_FDC_EARLYCON
+ bool "Early FDC console"
+ depends on MIPS_EJTAG_FDC_TTY
+ help
+ This registers a console on FDC channel 1 very early during boot (from
+ MIPS arch code). This is useful for bring-up and debugging early boot
+ issues.
+
+ Do not enable unless there is a debug probe attached to drain the FDC
+ TX FIFO.
+
+ If unsure, say N.
+
+config MIPS_EJTAG_FDC_KGDB
+ bool "Use KGDB over an FDC channel"
+ depends on MIPS_EJTAG_FDC_TTY && KGDB
+ default y
+ help
+ This enables the use of KGDB over an FDC channel, allowing KGDB to be
+ used remotely or when a serial port isn't available.
+
+config MIPS_EJTAG_FDC_KGDB_CHAN
+ int "KGDB FDC channel"
+ depends on MIPS_EJTAG_FDC_KGDB
+ range 2 15
+ default 3
+ help
+ FDC channel number to use for KGDB.
+
endif # TTY
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index 58ad1c05b7f8..5817e2397463 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -29,5 +29,6 @@ obj-$(CONFIG_SYNCLINK) += synclink.o
obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
obj-$(CONFIG_GOLDFISH_TTY) += goldfish.o
obj-$(CONFIG_DA_TTY) += metag_da.o
+obj-$(CONFIG_MIPS_EJTAG_FDC_TTY) += mips_ejtag_fdc.o
obj-y += ipwireless/
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
new file mode 100644
index 000000000000..04d9e23d1ee1
--- /dev/null
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -0,0 +1,1303 @@
+/*
+ * TTY driver for MIPS EJTAG Fast Debug Channels.
+ *
+ * Copyright (C) 2007-2015 Imagination Technologies Ltd
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for more
+ * details.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kgdb.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/uaccess.h>
+
+#include <asm/cdmm.h>
+#include <asm/irq.h>
+
+/* Register offsets */
+#define REG_FDACSR 0x00 /* FDC Access Control and Status Register */
+#define REG_FDCFG 0x08 /* FDC Configuration Register */
+#define REG_FDSTAT 0x10 /* FDC Status Register */
+#define REG_FDRX 0x18 /* FDC Receive Register */
+#define REG_FDTX(N) (0x20+0x8*(N)) /* FDC Transmit Register n (0..15) */
+
+/* Register fields */
+
+#define REG_FDCFG_TXINTTHRES_SHIFT 18
+#define REG_FDCFG_TXINTTHRES (0x3 << REG_FDCFG_TXINTTHRES_SHIFT)
+#define REG_FDCFG_TXINTTHRES_DISABLED (0x0 << REG_FDCFG_TXINTTHRES_SHIFT)
+#define REG_FDCFG_TXINTTHRES_EMPTY (0x1 << REG_FDCFG_TXINTTHRES_SHIFT)
+#define REG_FDCFG_TXINTTHRES_NOTFULL (0x2 << REG_FDCFG_TXINTTHRES_SHIFT)
+#define REG_FDCFG_TXINTTHRES_NEAREMPTY (0x3 << REG_FDCFG_TXINTTHRES_SHIFT)
+#define REG_FDCFG_RXINTTHRES_SHIFT 16
+#define REG_FDCFG_RXINTTHRES (0x3 << REG_FDCFG_RXINTTHRES_SHIFT)
+#define REG_FDCFG_RXINTTHRES_DISABLED (0x0 << REG_FDCFG_RXINTTHRES_SHIFT)
+#define REG_FDCFG_RXINTTHRES_FULL (0x1 << REG_FDCFG_RXINTTHRES_SHIFT)
+#define REG_FDCFG_RXINTTHRES_NOTEMPTY (0x2 << REG_FDCFG_RXINTTHRES_SHIFT)
+#define REG_FDCFG_RXINTTHRES_NEARFULL (0x3 << REG_FDCFG_RXINTTHRES_SHIFT)
+#define REG_FDCFG_TXFIFOSIZE_SHIFT 8
+#define REG_FDCFG_TXFIFOSIZE (0xff << REG_FDCFG_TXFIFOSIZE_SHIFT)
+#define REG_FDCFG_RXFIFOSIZE_SHIFT 0
+#define REG_FDCFG_RXFIFOSIZE (0xff << REG_FDCFG_RXFIFOSIZE_SHIFT)
+
+#define REG_FDSTAT_TXCOUNT_SHIFT 24
+#define REG_FDSTAT_TXCOUNT (0xff << REG_FDSTAT_TXCOUNT_SHIFT)
+#define REG_FDSTAT_RXCOUNT_SHIFT 16
+#define REG_FDSTAT_RXCOUNT (0xff << REG_FDSTAT_RXCOUNT_SHIFT)
+#define REG_FDSTAT_RXCHAN_SHIFT 4
+#define REG_FDSTAT_RXCHAN (0xf << REG_FDSTAT_RXCHAN_SHIFT)
+#define REG_FDSTAT_RXE BIT(3) /* Rx Empty */
+#define REG_FDSTAT_RXF BIT(2) /* Rx Full */
+#define REG_FDSTAT_TXE BIT(1) /* Tx Empty */
+#define REG_FDSTAT_TXF BIT(0) /* Tx Full */
+
+/* Default channel for the early console */
+#define CONSOLE_CHANNEL 1
+
+#define NUM_TTY_CHANNELS 16
+
+#define RX_BUF_SIZE 1024
+
+/*
+ * When the IRQ is unavailable, the FDC state must be polled for incoming data
+ * and space becoming available in TX FIFO.
+ */
+#define FDC_TTY_POLL (HZ / 50)
+
+struct mips_ejtag_fdc_tty;
+
+/**
+ * struct mips_ejtag_fdc_tty_port - Wrapper struct for FDC tty_port.
+ * @port: TTY port data
+ * @driver: TTY driver.
+ * @rx_lock: Lock for rx_buf.
+ * This protects between the hard interrupt and user
+ * context. It's also held during read SWITCH operations.
+ * @rx_buf: Read buffer.
+ * @xmit_lock: Lock for xmit_*, and port.xmit_buf.
+ * This protects between user context and kernel thread.
+ * It is used from chars_in_buffer()/write_room() TTY
+ * callbacks which are used during wait operations, so a
+ * mutex is unsuitable.
+ * @xmit_cnt: Size of xmit buffer contents.
+ * @xmit_head: Head of xmit buffer where data is written.
+ * @xmit_tail: Tail of xmit buffer where data is read.
+ * @xmit_empty: Completion for xmit buffer being empty.
+ */
+struct mips_ejtag_fdc_tty_port {
+ struct tty_port port;
+ struct mips_ejtag_fdc_tty *driver;
+ raw_spinlock_t rx_lock;
+ void *rx_buf;
+ spinlock_t xmit_lock;
+ unsigned int xmit_cnt;
+ unsigned int xmit_head;
+ unsigned int xmit_tail;
+ struct completion xmit_empty;
+};
+
+/**
+ * struct mips_ejtag_fdc_tty - Driver data for FDC as a whole.
+ * @dev: FDC device (for dev_*() logging).
+ * @driver: TTY driver.
+ * @cpu: CPU number for this FDC.
+ * @fdc_name: FDC name (not for base of channel names).
+ * @driver_name: Base of driver name.
+ * @ports: Per-channel data.
+ * @waitqueue: Wait queue for waiting for TX data, or for space in TX
+ * FIFO.
+ * @lock: Lock to protect FDCFG (interrupt enable).
+ * @thread: KThread for writing out data to FDC.
+ * @reg: FDC registers.
+ * @tx_fifo: TX FIFO size.
+ * @xmit_size: Size of each port's xmit buffer.
+ * @xmit_total: Total number of bytes (from all ports) to transmit.
+ * @xmit_next: Next port number to transmit from (round robin).
+ * @xmit_full: Indicates TX FIFO is full, we're waiting for space.
+ * @irq: IRQ number (negative if no IRQ).
+ * @removing: Indicates the device is being removed and @poll_timer
+ * should not be restarted.
+ * @poll_timer: Timer for polling for interrupt events when @irq < 0.
+ * @sysrq_pressed: Whether the magic sysrq key combination has been
+ * detected. See mips_ejtag_fdc_handle().
+ */
+struct mips_ejtag_fdc_tty {
+ struct device *dev;
+ struct tty_driver *driver;
+ unsigned int cpu;
+ char fdc_name[16];
+ char driver_name[16];
+ struct mips_ejtag_fdc_tty_port ports[NUM_TTY_CHANNELS];
+ wait_queue_head_t waitqueue;
+ raw_spinlock_t lock;
+ struct task_struct *thread;
+
+ void __iomem *reg;
+ u8 tx_fifo;
+
+ unsigned int xmit_size;
+ atomic_t xmit_total;
+ unsigned int xmit_next;
+ bool xmit_full;
+
+ int irq;
+ bool removing;
+ struct timer_list poll_timer;
+
+#ifdef CONFIG_MAGIC_SYSRQ
+ bool sysrq_pressed;
+#endif
+};
+
+/* Hardware access */
+
+static inline void mips_ejtag_fdc_write(struct mips_ejtag_fdc_tty *priv,
+ unsigned int offs, unsigned int data)
+{
+ iowrite32(data, priv->reg + offs);
+}
+
+static inline unsigned int mips_ejtag_fdc_read(struct mips_ejtag_fdc_tty *priv,
+ unsigned int offs)
+{
+ return ioread32(priv->reg + offs);
+}
+
+/* Encoding of byte stream in FDC words */
+
+/**
+ * struct fdc_word - FDC word encoding some number of bytes of data.
+ * @word: Raw FDC word.
+ * @bytes: Number of bytes encoded by @word.
+ */
+struct fdc_word {
+ u32 word;
+ unsigned int bytes;
+};
+
+/*
+ * This is a compact encoding which allows every 1 byte, 2 byte, and 3 byte
+ * sequence to be encoded in a single word, while allowing the majority of 4
+ * byte sequences (including all ASCII and common binary data) to be encoded in
+ * a single word too.
+ * _______________________ _____________
+ * | FDC Word | |
+ * |31-24|23-16|15-8 | 7-0 | Bytes |
+ * |_____|_____|_____|_____|_____________|
+ * | | | | | |
+ * |0x80 |0x80 |0x80 | WW | WW |
+ * |0x81 |0x81 | XX | WW | WW XX |
+ * |0x82 | YY | XX | WW | WW XX YY |
+ * | ZZ | YY | XX | WW | WW XX YY ZZ |
+ * |_____|_____|_____|_____|_____________|
+ *
+ * Note that the 4-byte encoding can only be used where none of the other 3
+ * encodings match, otherwise it must fall back to the 3 byte encoding.
+ */
+
+/* ranges >= 1 && sizes[0] >= 1 */
+static struct fdc_word mips_ejtag_fdc_encode(const char **ptrs,
+ unsigned int *sizes,
+ unsigned int ranges)
+{
+ struct fdc_word word = { 0, 0 };
+ const char **ptrs_end = ptrs + ranges;
+
+ for (; ptrs < ptrs_end; ++ptrs) {
+ const char *ptr = *(ptrs++);
+ const char *end = ptr + *(sizes++);
+
+ for (; ptr < end; ++ptr) {
+ word.word |= (u8)*ptr << (8*word.bytes);
+ ++word.bytes;
+ if (word.bytes == 4)
+ goto done;
+ }
+ }
+done:
+ /* Choose the appropriate encoding */
+ switch (word.bytes) {
+ case 4:
+ /* 4 byte encoding, but don't match the 1-3 byte encodings */
+ if ((word.word >> 8) != 0x808080 &&
+ (word.word >> 16) != 0x8181 &&
+ (word.word >> 24) != 0x82)
+ break;
+ /* Fall back to a 3 byte encoding */
+ word.bytes = 3;
+ word.word &= 0x00ffffff;
+ case 3:
+ /* 3 byte encoding */
+ word.word |= 0x82000000;
+ break;
+ case 2:
+ /* 2 byte encoding */
+ word.word |= 0x81810000;
+ break;
+ case 1:
+ /* 1 byte encoding */
+ word.word |= 0x80808000;
+ break;
+ }
+ return word;
+}
+
+static unsigned int mips_ejtag_fdc_decode(u32 word, char *buf)
+{
+ buf[0] = (u8)word;
+ word >>= 8;
+ if (word == 0x808080)
+ return 1;
+ buf[1] = (u8)word;
+ word >>= 8;
+ if (word == 0x8181)
+ return 2;
+ buf[2] = (u8)word;
+ word >>= 8;
+ if (word == 0x82)
+ return 3;
+ buf[3] = (u8)word;
+ return 4;
+}
+
+/* Console operations */
+
+/**
+ * struct mips_ejtag_fdc_console - Wrapper struct for FDC consoles.
+ * @cons: Console object.
+ * @tty_drv: TTY driver associated with this console.
+ * @lock: Lock to protect concurrent access to other fields.
+ * This is raw because it may be used very early.
+ * @initialised: Whether the console is initialised.
+ * @regs: Registers base address for each CPU.
+ */
+struct mips_ejtag_fdc_console {
+ struct console cons;
+ struct tty_driver *tty_drv;
+ raw_spinlock_t lock;
+ bool initialised;
+ void __iomem *regs[NR_CPUS];
+};
+
+/* Low level console write shared by early console and normal console */
+static void mips_ejtag_fdc_console_write(struct console *c, const char *s,
+ unsigned int count)
+{
+ struct mips_ejtag_fdc_console *cons =
+ container_of(c, struct mips_ejtag_fdc_console, cons);
+ void __iomem *regs;
+ struct fdc_word word;
+ unsigned long flags;
+ unsigned int i, buf_len, cpu;
+ bool done_cr = false;
+ char buf[4];
+ const char *buf_ptr = buf;
+ /* Number of bytes of input data encoded up to each byte in buf */
+ u8 inc[4];
+
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+ regs = cons->regs[cpu];
+ /* First console output on this CPU? */
+ if (!regs) {
+ regs = mips_cdmm_early_probe(0xfd);
+ cons->regs[cpu] = regs;
+ }
+ /* Already tried and failed to find FDC on this CPU? */
+ if (IS_ERR(regs))
+ goto out;
+ while (count) {
+ /*
+ * Copy the next few characters to a buffer so we can inject
+ * carriage returns before newlines.
+ */
+ for (buf_len = 0, i = 0; buf_len < 4 && i < count; ++buf_len) {
+ if (s[i] == '\n' && !done_cr) {
+ buf[buf_len] = '\r';
+ done_cr = true;
+ } else {
+ buf[buf_len] = s[i];
+ done_cr = false;
+ ++i;
+ }
+ inc[buf_len] = i;
+ }
+ word = mips_ejtag_fdc_encode(&buf_ptr, &buf_len, 1);
+ count -= inc[word.bytes - 1];
+ s += inc[word.bytes - 1];
+
+ /* Busy wait until there's space in fifo */
+ while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
+ ;
+ iowrite32(word.word, regs + REG_FDTX(c->index));
+ }
+out:
+ local_irq_restore(flags);
+}
+
+static struct tty_driver *mips_ejtag_fdc_console_device(struct console *c,
+ int *index)
+{
+ struct mips_ejtag_fdc_console *cons =
+ container_of(c, struct mips_ejtag_fdc_console, cons);
+
+ *index = c->index;
+ return cons->tty_drv;
+}
+
+/* Initialise an FDC console (early or normal */
+static int __init mips_ejtag_fdc_console_init(struct mips_ejtag_fdc_console *c)
+{
+ void __iomem *regs;
+ unsigned long flags;
+ int ret = 0;
+
+ raw_spin_lock_irqsave(&c->lock, flags);
+ /* Don't init twice */
+ if (c->initialised)
+ goto out;
+ /* Look for the FDC device */
+ regs = mips_cdmm_early_probe(0xfd);
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ goto out;
+ }
+
+ c->initialised = true;
+ c->regs[smp_processor_id()] = regs;
+ register_console(&c->cons);
+out:
+ raw_spin_unlock_irqrestore(&c->lock, flags);
+ return ret;
+}
+
+static struct mips_ejtag_fdc_console mips_ejtag_fdc_con = {
+ .cons = {
+ .name = "fdc",
+ .write = mips_ejtag_fdc_console_write,
+ .device = mips_ejtag_fdc_console_device,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ },
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(mips_ejtag_fdc_con.lock),
+};
+
+/* TTY RX/TX operations */
+
+/**
+ * mips_ejtag_fdc_put_chan() - Write out a block of channel data.
+ * @priv: Pointer to driver private data.
+ * @chan: Channel number.
+ *
+ * Write a single block of data out to the debug adapter. If the circular buffer
+ * is wrapped then only the first block is written.
+ *
+ * Returns: The number of bytes that were written.
+ */
+static unsigned int mips_ejtag_fdc_put_chan(struct mips_ejtag_fdc_tty *priv,
+ unsigned int chan)
+{
+ struct mips_ejtag_fdc_tty_port *dport;
+ struct tty_struct *tty;
+ const char *ptrs[2];
+ unsigned int sizes[2] = { 0 };
+ struct fdc_word word = { .bytes = 0 };
+ unsigned long flags;
+
+ dport = &priv->ports[chan];
+ spin_lock(&dport->xmit_lock);
+ if (dport->xmit_cnt) {
+ ptrs[0] = dport->port.xmit_buf + dport->xmit_tail;
+ sizes[0] = min_t(unsigned int,
+ priv->xmit_size - dport->xmit_tail,
+ dport->xmit_cnt);
+ ptrs[1] = dport->port.xmit_buf;
+ sizes[1] = dport->xmit_cnt - sizes[0];
+ word = mips_ejtag_fdc_encode(ptrs, sizes, 1 + !!sizes[1]);
+
+ dev_dbg(priv->dev, "%s%u: out %08x: \"%*pE%*pE\"\n",
+ priv->driver_name, chan, word.word,
+ min_t(int, word.bytes, sizes[0]), ptrs[0],
+ max_t(int, 0, word.bytes - sizes[0]), ptrs[1]);
+
+ local_irq_save(flags);
+ /* Maybe we raced with the console and TX FIFO is full */
+ if (mips_ejtag_fdc_read(priv, REG_FDSTAT) & REG_FDSTAT_TXF)
+ word.bytes = 0;
+ else
+ mips_ejtag_fdc_write(priv, REG_FDTX(chan), word.word);
+ local_irq_restore(flags);
+
+ dport->xmit_cnt -= word.bytes;
+ if (!dport->xmit_cnt) {
+ /* Reset pointers to avoid wraps */
+ dport->xmit_head = 0;
+ dport->xmit_tail = 0;
+ complete(&dport->xmit_empty);
+ } else {
+ dport->xmit_tail += word.bytes;
+ if (dport->xmit_tail >= priv->xmit_size)
+ dport->xmit_tail -= priv->xmit_size;
+ }
+ atomic_sub(word.bytes, &priv->xmit_total);
+ }
+ spin_unlock(&dport->xmit_lock);
+
+ /* If we've made more data available, wake up tty */
+ if (sizes[0] && word.bytes) {
+ tty = tty_port_tty_get(&dport->port);
+ if (tty) {
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+ }
+ }
+
+ return word.bytes;
+}
+
+/**
+ * mips_ejtag_fdc_put() - Kernel thread to write out channel data to FDC.
+ * @arg: Driver pointer.
+ *
+ * This kernel thread runs while @priv->xmit_total != 0, and round robins the
+ * channels writing out blocks of buffered data to the FDC TX FIFO.
+ */
+static int mips_ejtag_fdc_put(void *arg)
+{
+ struct mips_ejtag_fdc_tty *priv = arg;
+ struct mips_ejtag_fdc_tty_port *dport;
+ unsigned int ret;
+ u32 cfg;
+
+ __set_current_state(TASK_RUNNING);
+ while (!kthread_should_stop()) {
+ /* Wait for data to actually write */
+ wait_event_interruptible(priv->waitqueue,
+ atomic_read(&priv->xmit_total) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
+ /* Wait for TX FIFO space to write data */
+ raw_spin_lock_irq(&priv->lock);
+ if (mips_ejtag_fdc_read(priv, REG_FDSTAT) & REG_FDSTAT_TXF) {
+ priv->xmit_full = true;
+ if (priv->irq >= 0) {
+ /* Enable TX interrupt */
+ cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
+ cfg &= ~REG_FDCFG_TXINTTHRES;
+ cfg |= REG_FDCFG_TXINTTHRES_NOTFULL;
+ mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
+ }
+ }
+ raw_spin_unlock_irq(&priv->lock);
+ wait_event_interruptible(priv->waitqueue,
+ !(mips_ejtag_fdc_read(priv, REG_FDSTAT)
+ & REG_FDSTAT_TXF) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
+ /* Find next channel with data to output */
+ for (;;) {
+ dport = &priv->ports[priv->xmit_next];
+ spin_lock(&dport->xmit_lock);
+ ret = dport->xmit_cnt;
+ spin_unlock(&dport->xmit_lock);
+ if (ret)
+ break;
+ /* Round robin */
+ ++priv->xmit_next;
+ if (priv->xmit_next >= NUM_TTY_CHANNELS)
+ priv->xmit_next = 0;
+ }
+
+ /* Try writing data to the chosen channel */
+ ret = mips_ejtag_fdc_put_chan(priv, priv->xmit_next);
+
+ /*
+ * If anything was output, move on to the next channel so as not
+ * to starve other channels.
+ */
+ if (ret) {
+ ++priv->xmit_next;
+ if (priv->xmit_next >= NUM_TTY_CHANNELS)
+ priv->xmit_next = 0;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * mips_ejtag_fdc_handle() - Handle FDC events.
+ * @priv: Pointer to driver private data.
+ *
+ * Handle FDC events, such as new incoming data which needs draining out of the
+ * RX FIFO and feeding into the appropriate TTY ports, and space becoming
+ * available in the TX FIFO which would allow more data to be written out.
+ */
+static void mips_ejtag_fdc_handle(struct mips_ejtag_fdc_tty *priv)
+{
+ struct mips_ejtag_fdc_tty_port *dport;
+ unsigned int stat, channel, data, cfg, i, flipped;
+ int len;
+ char buf[4];
+
+ for (;;) {
+ /* Find which channel the next FDC word is destined for */
+ stat = mips_ejtag_fdc_read(priv, REG_FDSTAT);
+ if (stat & REG_FDSTAT_RXE)
+ break;
+ channel = (stat & REG_FDSTAT_RXCHAN) >> REG_FDSTAT_RXCHAN_SHIFT;
+ dport = &priv->ports[channel];
+
+ /* Read out the FDC word, decode it, and pass to tty layer */
+ raw_spin_lock(&dport->rx_lock);
+ data = mips_ejtag_fdc_read(priv, REG_FDRX);
+
+ len = mips_ejtag_fdc_decode(data, buf);
+ dev_dbg(priv->dev, "%s%u: in %08x: \"%*pE\"\n",
+ priv->driver_name, channel, data, len, buf);
+
+ flipped = 0;
+ for (i = 0; i < len; ++i) {
+#ifdef CONFIG_MAGIC_SYSRQ
+#ifdef CONFIG_MIPS_EJTAG_FDC_KGDB
+ /* Support just Ctrl+C with KGDB channel */
+ if (channel == CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN) {
+ if (buf[i] == '\x03') { /* ^C */
+ handle_sysrq('g');
+ continue;
+ }
+ }
+#endif
+ /* Support Ctrl+O for console channel */
+ if (channel == mips_ejtag_fdc_con.cons.index) {
+ if (buf[i] == '\x0f') { /* ^O */
+ priv->sysrq_pressed =
+ !priv->sysrq_pressed;
+ if (priv->sysrq_pressed)
+ continue;
+ } else if (priv->sysrq_pressed) {
+ handle_sysrq(buf[i]);
+ priv->sysrq_pressed = false;
+ continue;
+ }
+ }
+#endif /* CONFIG_MAGIC_SYSRQ */
+
+ /* Check the port isn't being shut down */
+ if (!dport->rx_buf)
+ continue;
+
+ flipped += tty_insert_flip_char(&dport->port, buf[i],
+ TTY_NORMAL);
+ }
+ if (flipped)
+ tty_flip_buffer_push(&dport->port);
+
+ raw_spin_unlock(&dport->rx_lock);
+ }
+
+ /* If TX FIFO no longer full we may be able to write more data */
+ raw_spin_lock(&priv->lock);
+ if (priv->xmit_full && !(stat & REG_FDSTAT_TXF)) {
+ priv->xmit_full = false;
+
+ /* Disable TX interrupt */
+ cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
+ cfg &= ~REG_FDCFG_TXINTTHRES;
+ cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
+ mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
+
+ /* Wait the kthread so it can try writing more data */
+ wake_up_interruptible(&priv->waitqueue);
+ }
+ raw_spin_unlock(&priv->lock);
+}
+
+/**
+ * mips_ejtag_fdc_isr() - Interrupt handler.
+ * @irq: IRQ number.
+ * @dev_id: Pointer to driver private data.
+ *
+ * This is the interrupt handler, used when interrupts are enabled.
+ *
+ * It simply triggers the common FDC handler code.
+ *
+ * Returns: IRQ_HANDLED if an FDC interrupt was pending.
+ * IRQ_NONE otherwise.
+ */
+static irqreturn_t mips_ejtag_fdc_isr(int irq, void *dev_id)
+{
+ struct mips_ejtag_fdc_tty *priv = dev_id;
+
+ /*
+ * We're not using proper per-cpu IRQs, so we must be careful not to
+ * handle IRQs on CPUs we're not interested in.
+ *
+ * Ideally proper per-cpu IRQ handlers could be used, but that doesn't
+ * fit well with the whole sharing of the main CPU IRQ lines. When we
+ * have something with a GIC that routes the FDC IRQs (i.e. no sharing
+ * between handlers) then support could be added more easily.
+ */
+ if (smp_processor_id() != priv->cpu)
+ return IRQ_NONE;
+
+ /* If no FDC interrupt pending, it wasn't for us */
+ if (!(read_c0_cause() & CAUSEF_FDCI))
+ return IRQ_NONE;
+
+ mips_ejtag_fdc_handle(priv);
+ return IRQ_HANDLED;
+}
+
+/**
+ * mips_ejtag_fdc_tty_timer() - Poll FDC for incoming data.
+ * @opaque: Pointer to driver private data.
+ *
+ * This is the timer handler for when interrupts are disabled and polling the
+ * FDC state is required.
+ *
+ * It simply triggers the common FDC handler code and arranges for further
+ * polling.
+ */
+static void mips_ejtag_fdc_tty_timer(unsigned long opaque)
+{
+ struct mips_ejtag_fdc_tty *priv = (void *)opaque;
+
+ mips_ejtag_fdc_handle(priv);
+ if (!priv->removing)
+ mod_timer_pinned(&priv->poll_timer, jiffies + FDC_TTY_POLL);
+}
+
+/* TTY Port operations */
+
+static int mips_ejtag_fdc_tty_port_activate(struct tty_port *port,
+ struct tty_struct *tty)
+{
+ struct mips_ejtag_fdc_tty_port *dport =
+ container_of(port, struct mips_ejtag_fdc_tty_port, port);
+ void *rx_buf;
+
+ /* Allocate the buffer we use for writing data */
+ if (tty_port_alloc_xmit_buf(port) < 0)
+ goto err;
+
+ /* Allocate the buffer we use for reading data */
+ rx_buf = kzalloc(RX_BUF_SIZE, GFP_KERNEL);
+ if (!rx_buf)
+ goto err_free_xmit;
+
+ raw_spin_lock_irq(&dport->rx_lock);
+ dport->rx_buf = rx_buf;
+ raw_spin_unlock_irq(&dport->rx_lock);
+
+ return 0;
+err_free_xmit:
+ tty_port_free_xmit_buf(port);
+err:
+ return -ENOMEM;
+}
+
+static void mips_ejtag_fdc_tty_port_shutdown(struct tty_port *port)
+{
+ struct mips_ejtag_fdc_tty_port *dport =
+ container_of(port, struct mips_ejtag_fdc_tty_port, port);
+ struct mips_ejtag_fdc_tty *priv = dport->driver;
+ void *rx_buf;
+ unsigned int count;
+
+ spin_lock(&dport->xmit_lock);
+ count = dport->xmit_cnt;
+ spin_unlock(&dport->xmit_lock);
+ if (count) {
+ /*
+ * There's still data to write out, so wake and wait for the
+ * writer thread to drain the buffer.
+ */
+ wake_up_interruptible(&priv->waitqueue);
+ wait_for_completion(&dport->xmit_empty);
+ }
+
+ /* Null the read buffer (timer could still be running!) */
+ raw_spin_lock_irq(&dport->rx_lock);
+ rx_buf = dport->rx_buf;
+ dport->rx_buf = NULL;
+ raw_spin_unlock_irq(&dport->rx_lock);
+ /* Free the read buffer */
+ kfree(rx_buf);
+
+ /* Free the write buffer */
+ tty_port_free_xmit_buf(port);
+}
+
+static const struct tty_port_operations mips_ejtag_fdc_tty_port_ops = {
+ .activate = mips_ejtag_fdc_tty_port_activate,
+ .shutdown = mips_ejtag_fdc_tty_port_shutdown,
+};
+
+/* TTY operations */
+
+static int mips_ejtag_fdc_tty_install(struct tty_driver *driver,
+ struct tty_struct *tty)
+{
+ struct mips_ejtag_fdc_tty *priv = driver->driver_state;
+
+ tty->driver_data = &priv->ports[tty->index];
+ return tty_port_install(&priv->ports[tty->index].port, driver, tty);
+}
+
+static int mips_ejtag_fdc_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ return tty_port_open(tty->port, tty, filp);
+}
+
+static void mips_ejtag_fdc_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ return tty_port_close(tty->port, tty, filp);
+}
+
+static void mips_ejtag_fdc_tty_hangup(struct tty_struct *tty)
+{
+ struct mips_ejtag_fdc_tty_port *dport = tty->driver_data;
+ struct mips_ejtag_fdc_tty *priv = dport->driver;
+
+ /* Drop any data in the xmit buffer */
+ spin_lock(&dport->xmit_lock);
+ if (dport->xmit_cnt) {
+ atomic_sub(dport->xmit_cnt, &priv->xmit_total);
+ dport->xmit_cnt = 0;
+ dport->xmit_head = 0;
+ dport->xmit_tail = 0;
+ complete(&dport->xmit_empty);
+ }
+ spin_unlock(&dport->xmit_lock);
+
+ tty_port_hangup(tty->port);
+}
+
+static int mips_ejtag_fdc_tty_write(struct tty_struct *tty,
+ const unsigned char *buf, int total)
+{
+ int count, block;
+ struct mips_ejtag_fdc_tty_port *dport = tty->driver_data;
+ struct mips_ejtag_fdc_tty *priv = dport->driver;
+
+ /*
+ * Write to output buffer.
+ *
+ * The reason that we asynchronously write the buffer is because if we
+ * were to write the buffer synchronously then because the channels are
+ * per-CPU the buffer would be written to the channel of whatever CPU
+ * we're running on.
+ *
+ * What we actually want to happen is have all input and output done on
+ * one CPU.
+ */
+ spin_lock(&dport->xmit_lock);
+ /* Work out how many bytes we can write to the xmit buffer */
+ total = min(total, (int)(priv->xmit_size - dport->xmit_cnt));
+ atomic_add(total, &priv->xmit_total);
+ dport->xmit_cnt += total;
+ /* Write the actual bytes (may need splitting if it wraps) */
+ for (count = total; count; count -= block) {
+ block = min(count, (int)(priv->xmit_size - dport->xmit_head));
+ memcpy(dport->port.xmit_buf + dport->xmit_head, buf, block);
+ dport->xmit_head += block;
+ if (dport->xmit_head >= priv->xmit_size)
+ dport->xmit_head -= priv->xmit_size;
+ buf += block;
+ }
+ count = dport->xmit_cnt;
+ /* Xmit buffer no longer empty? */
+ if (count)
+ reinit_completion(&dport->xmit_empty);
+ spin_unlock(&dport->xmit_lock);
+
+ /* Wake up the kthread */
+ if (total)
+ wake_up_interruptible(&priv->waitqueue);
+ return total;
+}
+
+static int mips_ejtag_fdc_tty_write_room(struct tty_struct *tty)
+{
+ struct mips_ejtag_fdc_tty_port *dport = tty->driver_data;
+ struct mips_ejtag_fdc_tty *priv = dport->driver;
+ int room;
+
+ /* Report the space in the xmit buffer */
+ spin_lock(&dport->xmit_lock);
+ room = priv->xmit_size - dport->xmit_cnt;
+ spin_unlock(&dport->xmit_lock);
+
+ return room;
+}
+
+static int mips_ejtag_fdc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct mips_ejtag_fdc_tty_port *dport = tty->driver_data;
+ int chars;
+
+ /* Report the number of bytes in the xmit buffer */
+ spin_lock(&dport->xmit_lock);
+ chars = dport->xmit_cnt;
+ spin_unlock(&dport->xmit_lock);
+
+ return chars;
+}
+
+static const struct tty_operations mips_ejtag_fdc_tty_ops = {
+ .install = mips_ejtag_fdc_tty_install,
+ .open = mips_ejtag_fdc_tty_open,
+ .close = mips_ejtag_fdc_tty_close,
+ .hangup = mips_ejtag_fdc_tty_hangup,
+ .write = mips_ejtag_fdc_tty_write,
+ .write_room = mips_ejtag_fdc_tty_write_room,
+ .chars_in_buffer = mips_ejtag_fdc_tty_chars_in_buffer,
+};
+
+static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
+{
+ int ret, nport;
+ struct mips_ejtag_fdc_tty_port *dport;
+ struct mips_ejtag_fdc_tty *priv;
+ struct tty_driver *driver;
+ unsigned int cfg, tx_fifo;
+
+ priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->cpu = dev->cpu;
+ priv->dev = &dev->dev;
+ mips_cdmm_set_drvdata(dev, priv);
+ atomic_set(&priv->xmit_total, 0);
+ raw_spin_lock_init(&priv->lock);
+
+ priv->reg = devm_ioremap_nocache(priv->dev, dev->res.start,
+ resource_size(&dev->res));
+ if (!priv->reg) {
+ dev_err(priv->dev, "ioremap failed for resource %pR\n",
+ &dev->res);
+ return -ENOMEM;
+ }
+
+ cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
+ tx_fifo = (cfg & REG_FDCFG_TXFIFOSIZE) >> REG_FDCFG_TXFIFOSIZE_SHIFT;
+ /* Disable interrupts */
+ cfg &= ~(REG_FDCFG_TXINTTHRES | REG_FDCFG_RXINTTHRES);
+ cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
+ cfg |= REG_FDCFG_RXINTTHRES_DISABLED;
+ mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
+
+ /* Make each port's xmit FIFO big enough to fill FDC TX FIFO */
+ priv->xmit_size = min(tx_fifo * 4, (unsigned int)SERIAL_XMIT_SIZE);
+
+ driver = tty_alloc_driver(NUM_TTY_CHANNELS, TTY_DRIVER_REAL_RAW);
+ if (IS_ERR(driver))
+ return PTR_ERR(driver);
+ priv->driver = driver;
+
+ driver->driver_name = "ejtag_fdc";
+ snprintf(priv->fdc_name, sizeof(priv->fdc_name), "ttyFDC%u", dev->cpu);
+ snprintf(priv->driver_name, sizeof(priv->driver_name), "%sc",
+ priv->fdc_name);
+ driver->name = priv->driver_name;
+ driver->major = 0; /* Auto-allocate */
+ driver->minor_start = 0;
+ driver->type = TTY_DRIVER_TYPE_SERIAL;
+ driver->subtype = SERIAL_TYPE_NORMAL;
+ driver->init_termios = tty_std_termios;
+ driver->init_termios.c_cflag |= CLOCAL;
+ driver->driver_state = priv;
+
+ tty_set_operations(driver, &mips_ejtag_fdc_tty_ops);
+ for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
+ dport = &priv->ports[nport];
+ dport->driver = priv;
+ tty_port_init(&dport->port);
+ dport->port.ops = &mips_ejtag_fdc_tty_port_ops;
+ raw_spin_lock_init(&dport->rx_lock);
+ spin_lock_init(&dport->xmit_lock);
+ /* The xmit buffer starts empty, i.e. completely written */
+ init_completion(&dport->xmit_empty);
+ complete(&dport->xmit_empty);
+ }
+
+ /* Set up the console */
+ mips_ejtag_fdc_con.regs[dev->cpu] = priv->reg;
+ if (dev->cpu == 0)
+ mips_ejtag_fdc_con.tty_drv = driver;
+
+ init_waitqueue_head(&priv->waitqueue);
+ priv->thread = kthread_create(mips_ejtag_fdc_put, priv, priv->fdc_name);
+ if (IS_ERR(priv->thread)) {
+ ret = PTR_ERR(priv->thread);
+ dev_err(priv->dev, "Couldn't create kthread (%d)\n", ret);
+ goto err_destroy_ports;
+ }
+ /*
+ * Bind the writer thread to the right CPU so it can't migrate.
+ * The channels are per-CPU and we want all channel I/O to be on a
+ * single predictable CPU.
+ */
+ kthread_bind(priv->thread, dev->cpu);
+ wake_up_process(priv->thread);
+
+ /* Look for an FDC IRQ */
+ priv->irq = -1;
+ if (get_c0_fdc_int)
+ priv->irq = get_c0_fdc_int();
+
+ /* Try requesting the IRQ */
+ if (priv->irq >= 0) {
+ /*
+ * IRQF_SHARED, IRQF_NO_SUSPEND: The FDC IRQ may be shared with
+ * other local interrupts such as the timer which sets
+ * IRQF_TIMER (including IRQF_NO_SUSPEND).
+ *
+ * IRQF_NO_THREAD: The FDC IRQ isn't individually maskable so it
+ * cannot be deferred and handled by a thread on RT kernels. For
+ * this reason any spinlocks used from the ISR are raw.
+ */
+ ret = devm_request_irq(priv->dev, priv->irq, mips_ejtag_fdc_isr,
+ IRQF_PERCPU | IRQF_SHARED |
+ IRQF_NO_THREAD | IRQF_NO_SUSPEND,
+ priv->fdc_name, priv);
+ if (ret)
+ priv->irq = -1;
+ }
+ if (priv->irq >= 0) {
+ /* IRQ is usable, enable RX interrupt */
+ raw_spin_lock_irq(&priv->lock);
+ cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
+ cfg &= ~REG_FDCFG_RXINTTHRES;
+ cfg |= REG_FDCFG_RXINTTHRES_NOTEMPTY;
+ mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
+ raw_spin_unlock_irq(&priv->lock);
+ } else {
+ /* If we didn't get an usable IRQ, poll instead */
+ setup_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
+ (unsigned long)priv);
+ priv->poll_timer.expires = jiffies + FDC_TTY_POLL;
+ /*
+ * Always attach the timer to the right CPU. The channels are
+ * per-CPU so all polling should be from a single CPU.
+ */
+ add_timer_on(&priv->poll_timer, dev->cpu);
+
+ dev_info(priv->dev, "No usable IRQ, polling enabled\n");
+ }
+
+ ret = tty_register_driver(driver);
+ if (ret < 0) {
+ dev_err(priv->dev, "Couldn't install tty driver (%d)\n", ret);
+ goto err_stop_irq;
+ }
+
+ return 0;
+
+err_stop_irq:
+ if (priv->irq >= 0) {
+ raw_spin_lock_irq(&priv->lock);
+ cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
+ /* Disable interrupts */
+ cfg &= ~(REG_FDCFG_TXINTTHRES | REG_FDCFG_RXINTTHRES);
+ cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
+ cfg |= REG_FDCFG_RXINTTHRES_DISABLED;
+ mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
+ raw_spin_unlock_irq(&priv->lock);
+ } else {
+ priv->removing = true;
+ del_timer_sync(&priv->poll_timer);
+ }
+ kthread_stop(priv->thread);
+err_destroy_ports:
+ if (dev->cpu == 0)
+ mips_ejtag_fdc_con.tty_drv = NULL;
+ for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
+ dport = &priv->ports[nport];
+ tty_port_destroy(&dport->port);
+ }
+ put_tty_driver(priv->driver);
+ return ret;
+}
+
+static int mips_ejtag_fdc_tty_remove(struct mips_cdmm_device *dev)
+{
+ struct mips_ejtag_fdc_tty *priv = mips_cdmm_get_drvdata(dev);
+ struct mips_ejtag_fdc_tty_port *dport;
+ int nport;
+ unsigned int cfg;
+
+ if (priv->irq >= 0) {
+ raw_spin_lock_irq(&priv->lock);
+ cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
+ /* Disable interrupts */
+ cfg &= ~(REG_FDCFG_TXINTTHRES | REG_FDCFG_RXINTTHRES);
+ cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
+ cfg |= REG_FDCFG_RXINTTHRES_DISABLED;
+ mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
+ raw_spin_unlock_irq(&priv->lock);
+ } else {
+ priv->removing = true;
+ del_timer_sync(&priv->poll_timer);
+ }
+ kthread_stop(priv->thread);
+ if (dev->cpu == 0)
+ mips_ejtag_fdc_con.tty_drv = NULL;
+ tty_unregister_driver(priv->driver);
+ for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
+ dport = &priv->ports[nport];
+ tty_port_destroy(&dport->port);
+ }
+ put_tty_driver(priv->driver);
+ return 0;
+}
+
+static int mips_ejtag_fdc_tty_cpu_down(struct mips_cdmm_device *dev)
+{
+ struct mips_ejtag_fdc_tty *priv = mips_cdmm_get_drvdata(dev);
+ unsigned int cfg;
+
+ if (priv->irq >= 0) {
+ raw_spin_lock_irq(&priv->lock);
+ cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
+ /* Disable interrupts */
+ cfg &= ~(REG_FDCFG_TXINTTHRES | REG_FDCFG_RXINTTHRES);
+ cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
+ cfg |= REG_FDCFG_RXINTTHRES_DISABLED;
+ mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
+ raw_spin_unlock_irq(&priv->lock);
+ } else {
+ priv->removing = true;
+ del_timer_sync(&priv->poll_timer);
+ }
+ kthread_stop(priv->thread);
+
+ return 0;
+}
+
+static int mips_ejtag_fdc_tty_cpu_up(struct mips_cdmm_device *dev)
+{
+ struct mips_ejtag_fdc_tty *priv = mips_cdmm_get_drvdata(dev);
+ unsigned int cfg;
+ int ret = 0;
+
+ if (priv->irq >= 0) {
+ /*
+ * IRQ is usable, enable RX interrupt
+ * This must be before kthread is restarted, as kthread may
+ * enable TX interrupt.
+ */
+ raw_spin_lock_irq(&priv->lock);
+ cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
+ cfg &= ~(REG_FDCFG_TXINTTHRES | REG_FDCFG_RXINTTHRES);
+ cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
+ cfg |= REG_FDCFG_RXINTTHRES_NOTEMPTY;
+ mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
+ raw_spin_unlock_irq(&priv->lock);
+ } else {
+ /* Restart poll timer */
+ priv->removing = false;
+ add_timer_on(&priv->poll_timer, dev->cpu);
+ }
+
+ /* Restart the kthread */
+ priv->thread = kthread_create(mips_ejtag_fdc_put, priv, priv->fdc_name);
+ if (IS_ERR(priv->thread)) {
+ ret = PTR_ERR(priv->thread);
+ dev_err(priv->dev, "Couldn't re-create kthread (%d)\n", ret);
+ goto out;
+ }
+ /* Bind it back to the right CPU and set it off */
+ kthread_bind(priv->thread, dev->cpu);
+ wake_up_process(priv->thread);
+out:
+ return ret;
+}
+
+static struct mips_cdmm_device_id mips_ejtag_fdc_tty_ids[] = {
+ { .type = 0xfd },
+ { }
+};
+
+static struct mips_cdmm_driver mips_ejtag_fdc_tty_driver = {
+ .drv = {
+ .name = "mips_ejtag_fdc",
+ },
+ .probe = mips_ejtag_fdc_tty_probe,
+ .remove = mips_ejtag_fdc_tty_remove,
+ .cpu_down = mips_ejtag_fdc_tty_cpu_down,
+ .cpu_up = mips_ejtag_fdc_tty_cpu_up,
+ .id_table = mips_ejtag_fdc_tty_ids,
+};
+module_mips_cdmm_driver(mips_ejtag_fdc_tty_driver);
+
+static int __init mips_ejtag_fdc_init_console(void)
+{
+ return mips_ejtag_fdc_console_init(&mips_ejtag_fdc_con);
+}
+console_initcall(mips_ejtag_fdc_init_console);
+
+#ifdef CONFIG_MIPS_EJTAG_FDC_EARLYCON
+static struct mips_ejtag_fdc_console mips_ejtag_fdc_earlycon = {
+ .cons = {
+ .name = "early_fdc",
+ .write = mips_ejtag_fdc_console_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = CONSOLE_CHANNEL,
+ },
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(mips_ejtag_fdc_earlycon.lock),
+};
+
+int __init setup_early_fdc_console(void)
+{
+ return mips_ejtag_fdc_console_init(&mips_ejtag_fdc_earlycon);
+}
+#endif
+
+#ifdef CONFIG_MIPS_EJTAG_FDC_KGDB
+
+/* read buffer to allow decompaction */
+static unsigned int kgdbfdc_rbuflen;
+static unsigned int kgdbfdc_rpos;
+static char kgdbfdc_rbuf[4];
+
+/* write buffer to allow compaction */
+static unsigned int kgdbfdc_wbuflen;
+static char kgdbfdc_wbuf[4];
+
+static void __iomem *kgdbfdc_setup(void)
+{
+ void __iomem *regs;
+ unsigned int cpu;
+
+ /* Find address, piggy backing off console percpu regs */
+ cpu = smp_processor_id();
+ regs = mips_ejtag_fdc_con.regs[cpu];
+ /* First console output on this CPU? */
+ if (!regs) {
+ regs = mips_cdmm_early_probe(0xfd);
+ mips_ejtag_fdc_con.regs[cpu] = regs;
+ }
+ /* Already tried and failed to find FDC on this CPU? */
+ if (IS_ERR(regs))
+ return regs;
+
+ return regs;
+}
+
+/* read a character from the read buffer, filling from FDC RX FIFO */
+static int kgdbfdc_read_char(void)
+{
+ unsigned int stat, channel, data;
+ void __iomem *regs;
+
+ /* No more data, try and read another FDC word from RX FIFO */
+ if (kgdbfdc_rpos >= kgdbfdc_rbuflen) {
+ kgdbfdc_rpos = 0;
+ kgdbfdc_rbuflen = 0;
+
+ regs = kgdbfdc_setup();
+ if (IS_ERR(regs))
+ return NO_POLL_CHAR;
+
+ /* Read next word from KGDB channel */
+ do {
+ stat = ioread32(regs + REG_FDSTAT);
+
+ /* No data waiting? */
+ if (stat & REG_FDSTAT_RXE)
+ return NO_POLL_CHAR;
+
+ /* Read next word */
+ channel = (stat & REG_FDSTAT_RXCHAN) >>
+ REG_FDSTAT_RXCHAN_SHIFT;
+ data = ioread32(regs + REG_FDRX);
+ } while (channel != CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN);
+
+ /* Decode into rbuf */
+ kgdbfdc_rbuflen = mips_ejtag_fdc_decode(data, kgdbfdc_rbuf);
+ }
+ pr_devel("kgdbfdc r %c\n", kgdbfdc_rbuf[kgdbfdc_rpos]);
+ return kgdbfdc_rbuf[kgdbfdc_rpos++];
+}
+
+/* push an FDC word from write buffer to TX FIFO */
+static void kgdbfdc_push_one(void)
+{
+ const char *bufs[1] = { kgdbfdc_wbuf };
+ struct fdc_word word;
+ void __iomem *regs;
+ unsigned int i;
+
+ /* Construct a word from any data in buffer */
+ word = mips_ejtag_fdc_encode(bufs, &kgdbfdc_wbuflen, 1);
+ /* Relocate any remaining data to beginnning of buffer */
+ kgdbfdc_wbuflen -= word.bytes;
+ for (i = 0; i < kgdbfdc_wbuflen; ++i)
+ kgdbfdc_wbuf[i] = kgdbfdc_wbuf[i + word.bytes];
+
+ regs = kgdbfdc_setup();
+ if (IS_ERR(regs))
+ return;
+
+ /* Busy wait until there's space in fifo */
+ while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
+ ;
+ iowrite32(word.word, regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN));
+}
+
+/* flush the whole write buffer to the TX FIFO */
+static void kgdbfdc_flush(void)
+{
+ while (kgdbfdc_wbuflen)
+ kgdbfdc_push_one();
+}
+
+/* write a character into the write buffer, writing out if full */
+static void kgdbfdc_write_char(u8 chr)
+{
+ pr_devel("kgdbfdc w %c\n", chr);
+ kgdbfdc_wbuf[kgdbfdc_wbuflen++] = chr;
+ if (kgdbfdc_wbuflen >= sizeof(kgdbfdc_wbuf))
+ kgdbfdc_push_one();
+}
+
+static struct kgdb_io kgdbfdc_io_ops = {
+ .name = "kgdbfdc",
+ .read_char = kgdbfdc_read_char,
+ .write_char = kgdbfdc_write_char,
+ .flush = kgdbfdc_flush,
+};
+
+static int __init kgdbfdc_init(void)
+{
+ kgdb_register_io_module(&kgdbfdc_io_ops);
+ return 0;
+}
+early_initcall(kgdbfdc_init);
+#endif