aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/resource.c4
-rw-r--r--drivers/acpi/video.c20
-rw-r--r--drivers/base/power/domain.c24
-rw-r--r--drivers/base/power/wakeup.c1
-rw-r--r--drivers/block/nvme-core.c99
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/clk/at91/pmc.c20
-rw-r--r--drivers/clk/at91/pmc.h1
-rw-r--r--drivers/clocksource/Kconfig16
-rw-r--r--drivers/clocksource/mtk_timer.c9
-rw-r--r--drivers/clocksource/pxa_timer.c2
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c21
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle.c61
-rw-r--r--drivers/dma-buf/fence.c3
-rw-r--r--drivers/dma-buf/reservation.c5
-rw-r--r--drivers/firmware/dmi_scan.c17
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c8
-rw-r--r--drivers/gpio/gpio-tps65912.c14
-rw-r--r--drivers/gpio/gpiolib-of.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c3
-rw-r--r--drivers/gpu/drm/drm_crtc.c3
-rw-r--r--drivers/gpu/drm/drm_mm.c152
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c30
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h15
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c7
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c22
-rw-r--r--drivers/gpu/drm/i915/intel_display.c34
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c18
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c8
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c36
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c28
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h15
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c99
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c5
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c35
-rw-r--r--drivers/gpu/drm/radeon/cik.c11
-rw-r--r--drivers/gpu/drm/radeon/cikd.h4
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c68
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c10
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c59
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h4
-rw-r--r--drivers/gpu/drm/radeon/ni.c10
-rw-r--r--drivers/gpu/drm/radeon/nid.h4
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/rs600.c4
-rw-r--r--drivers/gpu/drm/radeon/si.c25
-rw-r--r--drivers/gpu/drm/radeon/sid.h8
-rw-r--r--drivers/gpu/drm/tegra/dc.c79
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c2
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-microsoft.c2
-rw-r--r--drivers/hid/hid-saitek.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c8
-rw-r--r--drivers/hid/hid-sony.c6
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c7
-rw-r--r--drivers/hid/wacom_wac.c11
-rw-r--r--drivers/hwmon/ads7828.c3
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/raid1.c5
-rw-r--r--drivers/md/raid5.c13
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/appletalk/Kconfig2
-rw-r--r--drivers/net/dsa/bcm_sf2.h2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c7
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c7
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c47
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c175
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c122
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c54
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c246
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c24
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c119
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c143
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c9
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c32
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/ethernet/rocker/rocker.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c7
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c9
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h114
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c10
-rw-r--r--drivers/net/ethernet/sun/niu.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c5
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/macvtap.c7
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c82
-rw-r--r--drivers/net/phy/phy.c23
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/asix_devices.c4
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/plusb.c5
-rw-r--r--drivers/net/wan/cosa.c12
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/xen-netback/netback.c29
-rw-r--r--drivers/pci/host/pci-versatile.c2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c62
-rw-r--r--drivers/rtc/rtc-at91sam9.c73
-rw-r--r--drivers/rtc/rtc-ds1685.c18
-rw-r--r--drivers/sh/pm_runtime.c2
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c10
-rw-r--r--drivers/thermal/int340x_thermal/int340x_thermal_zone.c10
-rw-r--r--drivers/thermal/intel_powerclamp.c1
-rw-r--r--drivers/thermal/rcar_thermal.c26
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c41
-rw-r--r--drivers/thermal/thermal_core.c37
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c2
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c49
-rw-r--r--drivers/vhost/net.c25
-rw-r--r--drivers/watchdog/at91sam9_wdt.c3
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/preempt.c44
-rw-r--r--drivers/xen/privcmd.c2
-rw-r--r--drivers/xen/xen-scsiback.c14
165 files changed, 2097 insertions, 1278 deletions
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index c723668e3e27..5589a6e2a023 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -42,8 +42,10 @@ static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
* CHECKME: len might be required to check versus a minimum
* length as well. 1 for io is fine, but for memory it does
* not make any sense at all.
+ * Note: some BIOSes report incorrect length for ACPI address space
+ * descriptor, so remove check of 'reslen == len' to avoid regression.
*/
- if (len && reslen && reslen == len && start <= end)
+ if (len && reslen && start <= end)
return true;
pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index debd30917010..26eb70c8f518 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -2110,7 +2110,8 @@ static int __init intel_opregion_present(void)
int acpi_video_register(void)
{
- int result = 0;
+ int ret;
+
if (register_count) {
/*
* if the function of acpi_video_register is already called,
@@ -2122,9 +2123,9 @@ int acpi_video_register(void)
mutex_init(&video_list_lock);
INIT_LIST_HEAD(&video_bus_head);
- result = acpi_bus_register_driver(&acpi_video_bus);
- if (result < 0)
- return -ENODEV;
+ ret = acpi_bus_register_driver(&acpi_video_bus);
+ if (ret)
+ return ret;
/*
* When the acpi_video_bus is loaded successfully, increase
@@ -2176,6 +2177,17 @@ EXPORT_SYMBOL(acpi_video_unregister_backlight);
static int __init acpi_video_init(void)
{
+ /*
+ * Let the module load even if ACPI is disabled (e.g. due to
+ * a broken BIOS) so that i915.ko can still be loaded on such
+ * old systems without an AcpiOpRegion.
+ *
+ * acpi_video_register() will report -ENODEV later as well due
+ * to acpi_disabled when i915.ko tries to register itself afterwards.
+ */
+ if (acpi_disabled)
+ return 0;
+
dmi_check_system(video_dmi_table);
if (intel_opregion_present())
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index ba4abbe4693c..45937f88e77c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2242,7 +2242,7 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
}
static int pm_genpd_summary_one(struct seq_file *s,
- struct generic_pm_domain *gpd)
+ struct generic_pm_domain *genpd)
{
static const char * const status_lookup[] = {
[GPD_STATE_ACTIVE] = "on",
@@ -2256,26 +2256,26 @@ static int pm_genpd_summary_one(struct seq_file *s,
struct gpd_link *link;
int ret;
- ret = mutex_lock_interruptible(&gpd->lock);
+ ret = mutex_lock_interruptible(&genpd->lock);
if (ret)
return -ERESTARTSYS;
- if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup)))
+ if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
goto exit;
- seq_printf(s, "%-30s %-15s ", gpd->name, status_lookup[gpd->status]);
+ seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]);
/*
* Modifications on the list require holding locks on both
* master and slave, so we are safe.
- * Also gpd->name is immutable.
+ * Also genpd->name is immutable.
*/
- list_for_each_entry(link, &gpd->master_links, master_node) {
+ list_for_each_entry(link, &genpd->master_links, master_node) {
seq_printf(s, "%s", link->slave->name);
- if (!list_is_last(&link->master_node, &gpd->master_links))
+ if (!list_is_last(&link->master_node, &genpd->master_links))
seq_puts(s, ", ");
}
- list_for_each_entry(pm_data, &gpd->dev_list, list_node) {
+ list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
if (kobj_path == NULL)
continue;
@@ -2287,14 +2287,14 @@ static int pm_genpd_summary_one(struct seq_file *s,
seq_puts(s, "\n");
exit:
- mutex_unlock(&gpd->lock);
+ mutex_unlock(&genpd->lock);
return 0;
}
static int pm_genpd_summary_show(struct seq_file *s, void *data)
{
- struct generic_pm_domain *gpd;
+ struct generic_pm_domain *genpd;
int ret = 0;
seq_puts(s, " domain status slaves\n");
@@ -2305,8 +2305,8 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data)
if (ret)
return -ERESTARTSYS;
- list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
- ret = pm_genpd_summary_one(s, gpd);
+ list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
+ ret = pm_genpd_summary_one(s, genpd);
if (ret)
break;
}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index c2744b30d5d9..aab7158d2afe 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -730,6 +730,7 @@ void pm_system_wakeup(void)
pm_abort_suspend = true;
freeze_wake();
}
+EXPORT_SYMBOL_GPL(pm_system_wakeup);
void pm_wakeup_clear(void)
{
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index b64bccbb78c9..ceb32dd52a6c 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -482,6 +482,7 @@ static int nvme_error_status(u16 status)
}
}
+#ifdef CONFIG_BLK_DEV_INTEGRITY
static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
{
if (be32_to_cpu(pi->ref_tag) == v)
@@ -538,6 +539,58 @@ static void nvme_dif_remap(struct request *req,
kunmap_atomic(pmap);
}
+static int nvme_noop_verify(struct blk_integrity_iter *iter)
+{
+ return 0;
+}
+
+static int nvme_noop_generate(struct blk_integrity_iter *iter)
+{
+ return 0;
+}
+
+struct blk_integrity nvme_meta_noop = {
+ .name = "NVME_META_NOOP",
+ .generate_fn = nvme_noop_generate,
+ .verify_fn = nvme_noop_verify,
+};
+
+static void nvme_init_integrity(struct nvme_ns *ns)
+{
+ struct blk_integrity integrity;
+
+ switch (ns->pi_type) {
+ case NVME_NS_DPS_PI_TYPE3:
+ integrity = t10_pi_type3_crc;
+ break;
+ case NVME_NS_DPS_PI_TYPE1:
+ case NVME_NS_DPS_PI_TYPE2:
+ integrity = t10_pi_type1_crc;
+ break;
+ default:
+ integrity = nvme_meta_noop;
+ break;
+ }
+ integrity.tuple_size = ns->ms;
+ blk_integrity_register(ns->disk, &integrity);
+ blk_queue_max_integrity_segments(ns->queue, 1);
+}
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+static void nvme_dif_remap(struct request *req,
+ void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
+{
+}
+static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
+{
+}
+static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
+{
+}
+static void nvme_init_integrity(struct nvme_ns *ns)
+{
+}
+#endif
+
static void req_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
@@ -1959,43 +2012,6 @@ static void nvme_config_discard(struct nvme_ns *ns)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
}
-static int nvme_noop_verify(struct blk_integrity_iter *iter)
-{
- return 0;
-}
-
-static int nvme_noop_generate(struct blk_integrity_iter *iter)
-{
- return 0;
-}
-
-struct blk_integrity nvme_meta_noop = {
- .name = "NVME_META_NOOP",
- .generate_fn = nvme_noop_generate,
- .verify_fn = nvme_noop_verify,
-};
-
-static void nvme_init_integrity(struct nvme_ns *ns)
-{
- struct blk_integrity integrity;
-
- switch (ns->pi_type) {
- case NVME_NS_DPS_PI_TYPE3:
- integrity = t10_pi_type3_crc;
- break;
- case NVME_NS_DPS_PI_TYPE1:
- case NVME_NS_DPS_PI_TYPE2:
- integrity = t10_pi_type1_crc;
- break;
- default:
- integrity = nvme_meta_noop;
- break;
- }
- integrity.tuple_size = ns->ms;
- blk_integrity_register(ns->disk, &integrity);
- blk_queue_max_integrity_segments(ns->queue, 1);
-}
-
static int nvme_revalidate_disk(struct gendisk *disk)
{
struct nvme_ns *ns = disk->private_data;
@@ -2036,7 +2052,8 @@ static int nvme_revalidate_disk(struct gendisk *disk)
pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
id->dps & NVME_NS_DPS_PI_MASK : 0;
- if (disk->integrity && (ns->pi_type != pi_type || ns->ms != old_ms ||
+ if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
+ ns->ms != old_ms ||
bs != queue_logical_block_size(disk->queue) ||
(ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT)))
blk_integrity_unregister(disk);
@@ -2044,11 +2061,11 @@ static int nvme_revalidate_disk(struct gendisk *disk)
ns->pi_type = pi_type;
blk_queue_logical_block_size(ns->queue, bs);
- if (ns->ms && !disk->integrity && (disk->flags & GENHD_FL_UP) &&
+ if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) &&
!(id->flbas & NVME_NS_FLBAS_META_EXT))
nvme_init_integrity(ns);
- if (id->ncap == 0 || (ns->ms && !disk->integrity))
+ if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk)))
set_capacity(disk, 0);
else
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
@@ -2652,7 +2669,7 @@ static void nvme_dev_remove(struct nvme_dev *dev)
list_for_each_entry(ns, &dev->namespaces, list) {
if (ns->disk->flags & GENHD_FL_UP) {
- if (ns->disk->integrity)
+ if (blk_get_integrity(ns->disk))
blk_integrity_unregister(ns->disk);
del_gendisk(ns->disk);
}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 8e233edd7a09..871bd3550cb0 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -528,7 +528,7 @@ out_cleanup:
static inline void update_used_max(struct zram *zram,
const unsigned long pages)
{
- int old_max, cur_max;
+ unsigned long old_max, cur_max;
old_max = atomic_long_read(&zram->stats.max_used_pages);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index b87688881143..8bfc4c2bba87 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -272,6 +272,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
/* Intel Bluetooth devices */
+ { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index f07c8152e5cc..3f27d21fb729 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -89,12 +89,29 @@ static int pmc_irq_set_type(struct irq_data *d, unsigned type)
return 0;
}
+static void pmc_irq_suspend(struct irq_data *d)
+{
+ struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
+
+ pmc->imr = pmc_read(pmc, AT91_PMC_IMR);
+ pmc_write(pmc, AT91_PMC_IDR, pmc->imr);
+}
+
+static void pmc_irq_resume(struct irq_data *d)
+{
+ struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
+
+ pmc_write(pmc, AT91_PMC_IER, pmc->imr);
+}
+
static struct irq_chip pmc_irq = {
.name = "PMC",
.irq_disable = pmc_irq_mask,
.irq_mask = pmc_irq_mask,
.irq_unmask = pmc_irq_unmask,
.irq_set_type = pmc_irq_set_type,
+ .irq_suspend = pmc_irq_suspend,
+ .irq_resume = pmc_irq_resume,
};
static struct lock_class_key pmc_lock_class;
@@ -224,7 +241,8 @@ static struct at91_pmc *__init at91_pmc_init(struct device_node *np,
goto out_free_pmc;
pmc_write(pmc, AT91_PMC_IDR, 0xffffffff);
- if (request_irq(pmc->virq, pmc_irq_handler, IRQF_SHARED, "pmc", pmc))
+ if (request_irq(pmc->virq, pmc_irq_handler,
+ IRQF_SHARED | IRQF_COND_SUSPEND, "pmc", pmc))
goto out_remove_irqdomain;
return pmc;
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 52d2041fa3f6..69abb08cf146 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -33,6 +33,7 @@ struct at91_pmc {
spinlock_t lock;
const struct at91_pmc_caps *caps;
struct irq_domain *irqdomain;
+ u32 imr;
};
static inline void pmc_lock(struct at91_pmc *pmc)
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 1c2506f68122..68161f7a07d6 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -63,6 +63,11 @@ config VT8500_TIMER
config CADENCE_TTC_TIMER
bool
+config ASM9260_TIMER
+ bool
+ select CLKSRC_MMIO
+ select CLKSRC_OF
+
config CLKSRC_NOMADIK_MTU
bool
depends on (ARCH_NOMADIK || ARCH_U8500)
@@ -245,15 +250,4 @@ config CLKSRC_PXA
help
This enables OST0 support available on PXA and SA-11x0
platforms.
-
-config ASM9260_TIMER
- bool "Alphascale ASM9260 timer driver"
- depends on GENERIC_CLOCKEVENTS
- select CLKSRC_MMIO
- select CLKSRC_OF
- default y if MACH_ASM9260
- help
- This enables build of a clocksource and clockevent driver for
- the 32-bit System Timer hardware available on a Alphascale ASM9260.
-
endmenu
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
index 32a3d25795d3..68ab42356d0e 100644
--- a/drivers/clocksource/mtk_timer.c
+++ b/drivers/clocksource/mtk_timer.c
@@ -224,6 +224,8 @@ static void __init mtk_timer_init(struct device_node *node)
}
rate = clk_get_rate(clk);
+ mtk_timer_global_reset(evt);
+
if (request_irq(evt->dev.irq, mtk_timer_interrupt,
IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
pr_warn("failed to setup irq %d\n", evt->dev.irq);
@@ -232,8 +234,6 @@ static void __init mtk_timer_init(struct device_node *node)
evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
- mtk_timer_global_reset(evt);
-
/* Configure clock source */
mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
@@ -241,10 +241,11 @@ static void __init mtk_timer_init(struct device_node *node)
/* Configure clock event */
mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
- mtk_timer_enable_irq(evt, GPT_CLK_EVT);
-
clockevents_config_and_register(&evt->dev, rate, 0x3,
0xffffffff);
+
+ mtk_timer_enable_irq(evt, GPT_CLK_EVT);
+
return;
err_clk_disable:
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index 941f3f344e08..d9438af2bbd6 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -163,7 +163,7 @@ static struct irqaction pxa_ost0_irq = {
.dev_id = &ckevt_pxa_osmr0,
};
-static void pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
+static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
{
timer_writel(0, OIER);
timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 5e98c6b1f284..82d2fbb20f7e 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -159,7 +159,7 @@ static struct cpufreq_driver exynos_driver = {
static int exynos_cpufreq_probe(struct platform_device *pdev)
{
- struct device_node *cpus, *np;
+ struct device_node *cpu0;
int ret = -EINVAL;
exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
@@ -206,28 +206,19 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
if (ret)
goto err_cpufreq_reg;
- cpus = of_find_node_by_path("/cpus");
- if (!cpus) {
- pr_err("failed to find cpus node\n");
+ cpu0 = of_get_cpu_node(0, NULL);
+ if (!cpu0) {
+ pr_err("failed to find cpu0 node\n");
return 0;
}
- np = of_get_next_child(cpus, NULL);
- if (!np) {
- pr_err("failed to find cpus child node\n");
- of_node_put(cpus);
- return 0;
- }
-
- if (of_find_property(np, "#cooling-cells", NULL)) {
- cdev = of_cpufreq_cooling_register(np,
+ if (of_find_property(cpu0, "#cooling-cells", NULL)) {
+ cdev = of_cpufreq_cooling_register(cpu0,
cpu_present_mask);
if (IS_ERR(cdev))
pr_err("running cpufreq without cooling device: %ld\n",
PTR_ERR(cdev));
}
- of_node_put(np);
- of_node_put(cpus);
return 0;
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index bee5df7794d3..7cb4b766cf94 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -22,6 +22,8 @@
#include <linux/smp.h>
#include <sysdev/fsl_soc.h>
+#include <asm/smp.h> /* for get_hard_smp_processor_id() in UP configs */
+
/**
* struct cpu_data - per CPU data struct
* @parent: the parent node of cpu clock
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 4d534582514e..080bd2dbde4b 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -44,6 +44,12 @@ void disable_cpuidle(void)
off = 1;
}
+bool cpuidle_not_available(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{
+ return off || !initialized || !drv || !dev || !dev->enabled;
+}
+
/**
* cpuidle_play_dead - cpu off-lining
*
@@ -66,14 +72,8 @@ int cpuidle_play_dead(void)
return -ENODEV;
}
-/**
- * cpuidle_find_deepest_state - Find deepest state meeting specific conditions.
- * @drv: cpuidle driver for the given CPU.
- * @dev: cpuidle device for the given CPU.
- * @freeze: Whether or not the state should be suitable for suspend-to-idle.
- */
-static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev, bool freeze)
+static int find_deepest_state(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev, bool freeze)
{
unsigned int latency_req = 0;
int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1;
@@ -92,6 +92,17 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
return ret;
}
+/**
+ * cpuidle_find_deepest_state - Find the deepest available idle state.
+ * @drv: cpuidle driver for the given CPU.
+ * @dev: cpuidle device for the given CPU.
+ */
+int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{
+ return find_deepest_state(drv, dev, false);
+}
+
static void enter_freeze_proper(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
{
@@ -113,15 +124,14 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
/**
* cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
+ * @drv: cpuidle driver for the given CPU.
+ * @dev: cpuidle device for the given CPU.
*
* If there are states with the ->enter_freeze callback, find the deepest of
- * them and enter it with frozen tick. Otherwise, find the deepest state
- * available and enter it normally.
+ * them and enter it with frozen tick.
*/
-void cpuidle_enter_freeze(void)
+int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
- struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
- struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int index;
/*
@@ -129,24 +139,11 @@ void cpuidle_enter_freeze(void)
* that interrupts won't be enabled when it exits and allows the tick to
* be frozen safely.
*/
- index = cpuidle_find_deepest_state(drv, dev, true);
- if (index >= 0) {
- enter_freeze_proper(drv, dev, index);
- return;
- }
-
- /*
- * It is not safe to freeze the tick, find the deepest state available
- * at all and try to enter it normally.
- */
- index = cpuidle_find_deepest_state(drv, dev, false);
+ index = find_deepest_state(drv, dev, true);
if (index >= 0)
- cpuidle_enter(drv, dev, index);
- else
- arch_cpu_idle();
+ enter_freeze_proper(drv, dev, index);
- /* Interrupts are enabled again here. */
- local_irq_disable();
+ return index;
}
/**
@@ -205,12 +202,6 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
*/
int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
- if (off || !initialized)
- return -ENODEV;
-
- if (!drv || !dev || !dev->enabled)
- return -EBUSY;
-
return cpuidle_curr_governor->select(drv, dev);
}
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index e5541117b3e9..50ef8bd8708b 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -159,6 +159,9 @@ fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
if (WARN_ON(timeout < 0))
return -EINVAL;
+ if (timeout == 0)
+ return fence_is_signaled(fence);
+
trace_fence_wait_start(fence);
ret = fence->ops->wait(fence, intr, timeout);
trace_fence_wait_end(fence);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 3c97c8fa8d02..39920d77f288 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -327,6 +327,9 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
unsigned seq, shared_count, i = 0;
long ret = timeout;
+ if (!timeout)
+ return reservation_object_test_signaled_rcu(obj, wait_all);
+
retry:
fence = NULL;
shared_count = 0;
@@ -402,8 +405,6 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
int ret = 1;
if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
- int ret;
-
fence = fence_get_rcu(lfence);
if (!fence)
return -1;
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index c5f7b4e9eb6c..69fac068669f 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -78,7 +78,7 @@ static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
* We have to be cautious here. We have seen BIOSes with DMI pointers
* pointing to completely the wrong place for example
*/
-static void dmi_table(u8 *buf, int len, int num,
+static void dmi_table(u8 *buf, u32 len, int num,
void (*decode)(const struct dmi_header *, void *),
void *private_data)
{
@@ -93,12 +93,6 @@ static void dmi_table(u8 *buf, int len, int num,
const struct dmi_header *dm = (const struct dmi_header *)data;
/*
- * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
- */
- if (dm->type == DMI_ENTRY_END_OF_TABLE)
- break;
-
- /*
* We want to know the total length (formatted area and
* strings) before decoding to make sure we won't run off the
* table in dmi_decode or dmi_string
@@ -108,13 +102,20 @@ static void dmi_table(u8 *buf, int len, int num,
data++;
if (data - buf < len - 1)
decode(dm, private_data);
+
+ /*
+ * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
+ */
+ if (dm->type == DMI_ENTRY_END_OF_TABLE)
+ break;
+
data += 2;
i++;
}
}
static phys_addr_t dmi_base;
-static u16 dmi_len;
+static u32 dmi_len;
static u16 dmi_num;
static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 2fe195002021..f07d4a67fa76 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -179,12 +179,12 @@ again:
start = desc->phys_addr;
end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
- if ((start + size) > end || (start + size) > max)
- continue;
-
- if (end - size > max)
+ if (end > max)
end = max;
+ if ((start + size) > end)
+ continue;
+
if (round_down(end - size, align) < start)
continue;
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index 472fb5b8779f..9cdbc0c9cb2d 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -26,9 +26,12 @@ struct tps65912_gpio_data {
struct gpio_chip gpio_chip;
};
+#define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip)
+
static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
{
- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+ struct tps65912 *tps65912 = tps65912_gpio->tps65912;
int val;
val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
@@ -42,7 +45,8 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
int value)
{
- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+ struct tps65912 *tps65912 = tps65912_gpio->tps65912;
if (value)
tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
@@ -55,7 +59,8 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
int value)
{
- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+ struct tps65912 *tps65912 = tps65912_gpio->tps65912;
/* Set the initial value */
tps65912_gpio_set(gc, offset, value);
@@ -66,7 +71,8 @@ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
{
- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+ struct tps65912 *tps65912 = tps65912_gpio->tps65912;
return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
GPIO_CFG_MASK);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 8cad8e400b44..4650bf830d6b 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -46,12 +46,13 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
if (ret < 0) {
- /* We've found the gpio chip, but the translation failed.
- * Return true to stop looking and return the translation
- * error via out_gpio
+ /* We've found a gpio chip, but the translation failed.
+ * Store translation error in out_gpio.
+ * Return false to keep looking, as more than one gpio chip
+ * could be registered per of-node.
*/
gg_data->out_gpio = ERR_PTR(ret);
- return true;
+ return false;
}
gg_data->out_gpio = gpiochip_get_desc(gc, ret);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index b3589d0e39b9..910ff8ab9c9c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -62,12 +62,18 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
return KFD_MQD_TYPE_CP;
}
-static inline unsigned int get_first_pipe(struct device_queue_manager *dqm)
+unsigned int get_first_pipe(struct device_queue_manager *dqm)
{
- BUG_ON(!dqm);
+ BUG_ON(!dqm || !dqm->dev);
return dqm->dev->shared_resources.first_compute_pipe;
}
+unsigned int get_pipes_num(struct device_queue_manager *dqm)
+{
+ BUG_ON(!dqm || !dqm->dev);
+ return dqm->dev->shared_resources.compute_pipe_count;
+}
+
static inline unsigned int get_pipes_num_cpsch(void)
{
return PIPE_PER_ME_CP_SCHEDULING;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index d64f86cda34f..488f51d19427 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -163,6 +163,8 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
int init_pipelines(struct device_queue_manager *dqm,
unsigned int pipes_num, unsigned int first_pipe);
+unsigned int get_first_pipe(struct device_queue_manager *dqm);
+unsigned int get_pipes_num(struct device_queue_manager *dqm);
extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
@@ -175,10 +177,4 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
return (pdd->lds_base >> 60) & 0x0E;
}
-extern inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
-{
- BUG_ON(!dqm || !dqm->dev);
- return dqm->dev->shared_resources.compute_pipe_count;
-}
-
#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 6b072466e2a6..5469efe0523e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -131,5 +131,5 @@ static int register_process_cik(struct device_queue_manager *dqm,
static int initialize_cpsch_cik(struct device_queue_manager *dqm)
{
- return init_pipelines(dqm, get_pipes_num(dqm), 0);
+ return init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 0409b907de5d..b3e3068c6ec0 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -153,7 +153,7 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
(adj->crtc_hdisplay - 1) |
((adj->crtc_vdisplay - 1) << 16));
- cfg = ATMEL_HLCDC_CLKPOL;
+ cfg = 0;
prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
mode_rate = mode->crtc_clock * 1000;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 7320a6c6613f..c1cb17493e0d 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -311,8 +311,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
pm_runtime_enable(dev->dev);
- pm_runtime_put_sync(dev->dev);
-
ret = atmel_hlcdc_dc_modeset_init(dev);
if (ret < 0) {
dev_err(dev->dev, "failed to initialize mode setting\n");
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
index 063d2a7b941f..e79bd9ba474b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
@@ -311,7 +311,8 @@ int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
/* Disable the layer */
regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
- ATMEL_HLCDC_LAYER_RST);
+ ATMEL_HLCDC_LAYER_RST | ATMEL_HLCDC_LAYER_A2Q |
+ ATMEL_HLCDC_LAYER_UPDATE);
/* Clear all pending interrupts */
regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 6b00173d1be4..6b6b07ff720b 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2127,7 +2127,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
mutex_lock(&dev->mode_config.mutex);
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
connector = drm_connector_find(dev, out_resp->connector_id);
if (!connector) {
@@ -2157,6 +2156,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out_resp->mm_height = connector->display_info.height_mm;
out_resp->subpixel = connector->display_info.subpixel_order;
out_resp->connection = connector->status;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
encoder = drm_connector_get_encoder(connector);
if (encoder)
out_resp->encoder_id = encoder->base.id;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 04a209e2b66d..7fc6f8bd4821 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -91,29 +91,29 @@
*/
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
- unsigned long size,
+ u64 size,
unsigned alignment,
unsigned long color,
enum drm_mm_search_flags flags);
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
- unsigned long size,
+ u64 size,
unsigned alignment,
unsigned long color,
- unsigned long start,
- unsigned long end,
+ u64 start,
+ u64 end,
enum drm_mm_search_flags flags);
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
- unsigned long size, unsigned alignment,
+ u64 size, unsigned alignment,
unsigned long color,
enum drm_mm_allocator_flags flags)
{
struct drm_mm *mm = hole_node->mm;
- unsigned long hole_start = drm_mm_hole_node_start(hole_node);
- unsigned long hole_end = drm_mm_hole_node_end(hole_node);
- unsigned long adj_start = hole_start;
- unsigned long adj_end = hole_end;
+ u64 hole_start = drm_mm_hole_node_start(hole_node);
+ u64 hole_end = drm_mm_hole_node_end(hole_node);
+ u64 adj_start = hole_start;
+ u64 adj_end = hole_end;
BUG_ON(node->allocated);
@@ -124,12 +124,15 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
adj_start = adj_end - size;
if (alignment) {
- unsigned tmp = adj_start % alignment;
- if (tmp) {
+ u64 tmp = adj_start;
+ unsigned rem;
+
+ rem = do_div(tmp, alignment);
+ if (rem) {
if (flags & DRM_MM_CREATE_TOP)
- adj_start -= tmp;
+ adj_start -= rem;
else
- adj_start += alignment - tmp;
+ adj_start += alignment - rem;
}
}
@@ -176,9 +179,9 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
struct drm_mm_node *hole;
- unsigned long end = node->start + node->size;
- unsigned long hole_start;
- unsigned long hole_end;
+ u64 end = node->start + node->size;
+ u64 hole_start;
+ u64 hole_end;
BUG_ON(node == NULL);
@@ -227,7 +230,7 @@ EXPORT_SYMBOL(drm_mm_reserve_node);
* 0 on success, -ENOSPC if there's no suitable hole.
*/
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment,
+ u64 size, unsigned alignment,
unsigned long color,
enum drm_mm_search_flags sflags,
enum drm_mm_allocator_flags aflags)
@@ -246,16 +249,16 @@ EXPORT_SYMBOL(drm_mm_insert_node_generic);
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
- unsigned long size, unsigned alignment,
+ u64 size, unsigned alignment,
unsigned long color,
- unsigned long start, unsigned long end,
+ u64 start, u64 end,
enum drm_mm_allocator_flags flags)
{
struct drm_mm *mm = hole_node->mm;
- unsigned long hole_start = drm_mm_hole_node_start(hole_node);
- unsigned long hole_end = drm_mm_hole_node_end(hole_node);
- unsigned long adj_start = hole_start;
- unsigned long adj_end = hole_end;
+ u64 hole_start = drm_mm_hole_node_start(hole_node);
+ u64 hole_end = drm_mm_hole_node_end(hole_node);
+ u64 adj_start = hole_start;
+ u64 adj_end = hole_end;
BUG_ON(!hole_node->hole_follows || node->allocated);
@@ -271,12 +274,15 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
if (alignment) {
- unsigned tmp = adj_start % alignment;
- if (tmp) {
+ u64 tmp = adj_start;
+ unsigned rem;
+
+ rem = do_div(tmp, alignment);
+ if (rem) {
if (flags & DRM_MM_CREATE_TOP)
- adj_start -= tmp;
+ adj_start -= rem;
else
- adj_start += alignment - tmp;
+ adj_start += alignment - rem;
}
}
@@ -324,9 +330,9 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
* 0 on success, -ENOSPC if there's no suitable hole.
*/
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment,
+ u64 size, unsigned alignment,
unsigned long color,
- unsigned long start, unsigned long end,
+ u64 start, u64 end,
enum drm_mm_search_flags sflags,
enum drm_mm_allocator_flags aflags)
{
@@ -387,32 +393,34 @@ void drm_mm_remove_node(struct drm_mm_node *node)
}
EXPORT_SYMBOL(drm_mm_remove_node);
-static int check_free_hole(unsigned long start, unsigned long end,
- unsigned long size, unsigned alignment)
+static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
{
if (end - start < size)
return 0;
if (alignment) {
- unsigned tmp = start % alignment;
+ u64 tmp = start;
+ unsigned rem;
+
+ rem = do_div(tmp, alignment);
if (tmp)
- start += alignment - tmp;
+ start += alignment - rem;
}
return end >= start + size;
}
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
- unsigned long size,
+ u64 size,
unsigned alignment,
unsigned long color,
enum drm_mm_search_flags flags)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
- unsigned long adj_start;
- unsigned long adj_end;
- unsigned long best_size;
+ u64 adj_start;
+ u64 adj_end;
+ u64 best_size;
BUG_ON(mm->scanned_blocks);
@@ -421,7 +429,7 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
flags & DRM_MM_SEARCH_BELOW) {
- unsigned long hole_size = adj_end - adj_start;
+ u64 hole_size = adj_end - adj_start;
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
@@ -445,18 +453,18 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
}
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
- unsigned long size,
+ u64 size,
unsigned alignment,
unsigned long color,
- unsigned long start,
- unsigned long end,
+ u64 start,
+ u64 end,
enum drm_mm_search_flags flags)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
- unsigned long adj_start;
- unsigned long adj_end;
- unsigned long best_size;
+ u64 adj_start;
+ u64 adj_end;
+ u64 best_size;
BUG_ON(mm->scanned_blocks);
@@ -465,7 +473,7 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
flags & DRM_MM_SEARCH_BELOW) {
- unsigned long hole_size = adj_end - adj_start;
+ u64 hole_size = adj_end - adj_start;
if (adj_start < start)
adj_start = start;
@@ -561,7 +569,7 @@ EXPORT_SYMBOL(drm_mm_replace_node);
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan(struct drm_mm *mm,
- unsigned long size,
+ u64 size,
unsigned alignment,
unsigned long color)
{
@@ -594,11 +602,11 @@ EXPORT_SYMBOL(drm_mm_init_scan);
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan_with_range(struct drm_mm *mm,
- unsigned long size,
+ u64 size,
unsigned alignment,
unsigned long color,
- unsigned long start,
- unsigned long end)
+ u64 start,
+ u64 end)
{
mm->scan_color = color;
mm->scan_alignment = alignment;
@@ -627,8 +635,8 @@ bool drm_mm_scan_add_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
- unsigned long hole_start, hole_end;
- unsigned long adj_start, adj_end;
+ u64 hole_start, hole_end;
+ u64 adj_start, adj_end;
mm->scanned_blocks++;
@@ -731,7 +739,7 @@ EXPORT_SYMBOL(drm_mm_clean);
*
* Note that @mm must be cleared to 0 before calling this function.
*/
-void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
{
INIT_LIST_HEAD(&mm->hole_stack);
mm->scanned_blocks = 0;
@@ -766,18 +774,17 @@ void drm_mm_takedown(struct drm_mm * mm)
}
EXPORT_SYMBOL(drm_mm_takedown);
-static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
- const char *prefix)
+static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
+ const char *prefix)
{
- unsigned long hole_start, hole_end, hole_size;
+ u64 hole_start, hole_end, hole_size;
if (entry->hole_follows) {
hole_start = drm_mm_hole_node_start(entry);
hole_end = drm_mm_hole_node_end(entry);
hole_size = hole_end - hole_start;
- printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
- prefix, hole_start, hole_end,
- hole_size);
+ pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
+ hole_end, hole_size);
return hole_size;
}
@@ -792,35 +799,34 @@ static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
struct drm_mm_node *entry;
- unsigned long total_used = 0, total_free = 0, total = 0;
+ u64 total_used = 0, total_free = 0, total = 0;
total_free += drm_mm_debug_hole(&mm->head_node, prefix);
drm_mm_for_each_node(entry, mm) {
- printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
- prefix, entry->start, entry->start + entry->size,
- entry->size);
+ pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
+ entry->start + entry->size, entry->size);
total_used += entry->size;
total_free += drm_mm_debug_hole(entry, prefix);
}
total = total_free + total_used;
- printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
- total_used, total_free);
+ pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
+ total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_debug_table);
#if defined(CONFIG_DEBUG_FS)
-static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
+static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
{
- unsigned long hole_start, hole_end, hole_size;
+ u64 hole_start, hole_end, hole_size;
if (entry->hole_follows) {
hole_start = drm_mm_hole_node_start(entry);
hole_end = drm_mm_hole_node_end(entry);
hole_size = hole_end - hole_start;
- seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
- hole_start, hole_end, hole_size);
+ seq_printf(m, "%#llx-%#llx: %llu: free\n", hole_start,
+ hole_end, hole_size);
return hole_size;
}
@@ -835,20 +841,20 @@ static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *en
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
struct drm_mm_node *entry;
- unsigned long total_used = 0, total_free = 0, total = 0;
+ u64 total_used = 0, total_free = 0, total = 0;
total_free += drm_mm_dump_hole(m, &mm->head_node);
drm_mm_for_each_node(entry, mm) {
- seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
- entry->start, entry->start + entry->size,
- entry->size);
+ seq_printf(m, "%#016llx-%#016llx: %llu: used\n", entry->start,
+ entry->start + entry->size, entry->size);
total_used += entry->size;
total_free += drm_mm_dump_hole(m, entry);
}
total = total_free + total_used;
- seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
+ seq_printf(m, "total: %llu, used %llu free %llu\n", total,
+ total_used, total_free);
return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 96e811fe24ca..e8b18e542da4 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -152,12 +152,12 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_puts(m, " (pp");
else
seq_puts(m, " (g");
- seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)",
+ seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)",
vma->node.start, vma->node.size,
vma->ggtt_view.type);
}
if (obj->stolen)
- seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
+ seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
if (obj->pin_mappable || obj->fault_mappable) {
char s[3], *t = s;
if (obj->pin_mappable)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 8039cec71fc2..cc6ea53d2b81 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -622,7 +622,7 @@ static int i915_drm_suspend(struct drm_device *dev)
return 0;
}
-static int i915_drm_suspend_late(struct drm_device *drm_dev)
+static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
{
struct drm_i915_private *dev_priv = drm_dev->dev_private;
int ret;
@@ -636,7 +636,17 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev)
}
pci_disable_device(drm_dev->pdev);
- pci_set_power_state(drm_dev->pdev, PCI_D3hot);
+ /*
+ * During hibernation on some GEN4 platforms the BIOS may try to access
+ * the device even though it's already in D3 and hang the machine. So
+ * leave the device in D0 on those platforms and hope the BIOS will
+ * power down the device properly. Platforms where this was seen:
+ * Lenovo Thinkpad X301, X61s
+ */
+ if (!(hibernation &&
+ drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
+ INTEL_INFO(dev_priv)->gen == 4))
+ pci_set_power_state(drm_dev->pdev, PCI_D3hot);
return 0;
}
@@ -662,7 +672,7 @@ int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
if (error)
return error;
- return i915_drm_suspend_late(dev);
+ return i915_drm_suspend_late(dev, false);
}
static int i915_drm_resume(struct drm_device *dev)
@@ -950,7 +960,17 @@ static int i915_pm_suspend_late(struct device *dev)
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_suspend_late(drm_dev);
+ return i915_drm_suspend_late(drm_dev, false);
+}
+
+static int i915_pm_poweroff_late(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_suspend_late(drm_dev, true);
}
static int i915_pm_resume_early(struct device *dev)
@@ -1520,7 +1540,7 @@ static const struct dev_pm_ops i915_pm_ops = {
.thaw_early = i915_pm_resume_early,
.thaw = i915_pm_resume,
.poweroff = i915_pm_suspend,
- .poweroff_late = i915_pm_suspend_late,
+ .poweroff_late = i915_pm_poweroff_late,
.restore_early = i915_pm_resume_early,
.restore = i915_pm_resume,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f2a825e39646..8727086cf48c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2114,6 +2114,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
* number comparisons on buffer last_read|write_seqno. It also allows an
* emission time to be associated with the request for tracking how far ahead
* of the GPU the submission is.
+ *
+ * The requests are reference counted, so upon creation they should have an
+ * initial reference taken using kref_init
*/
struct drm_i915_gem_request {
struct kref ref;
@@ -2137,7 +2140,16 @@ struct drm_i915_gem_request {
/** Position in the ringbuffer of the end of the whole request */
u32 tail;
- /** Context related to this request */
+ /**
+ * Context related to this request
+ * Contexts are refcounted, so when this request is associated with a
+ * context, we must increment the context's refcount, to guarantee that
+ * it persists while any request is linked to it. Requests themselves
+ * are also refcounted, so the request will only be freed when the last
+ * reference to it is dismissed, and the code in
+ * i915_gem_request_free() will then decrement the refcount on the
+ * context.
+ */
struct intel_context *ctx;
/** Batch buffer related to this request if any */
@@ -2374,6 +2386,7 @@ struct drm_i915_cmd_table {
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
((INTEL_DEVID(dev) & 0xf) == 0x6 || \
+ (INTEL_DEVID(dev) & 0xf) == 0xb || \
(INTEL_DEVID(dev) & 0xf) == 0xe))
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c26d36cc4b31..e5daad5f75fb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2659,8 +2659,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
if (submit_req->ctx != ring->default_context)
intel_lr_context_unpin(ring, submit_req->ctx);
- i915_gem_context_unreference(submit_req->ctx);
- kfree(submit_req);
+ i915_gem_request_unreference(submit_req);
}
/*
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 746f77fb57a3..dccdc8aad2e2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1145,7 +1145,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
- DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
+ DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
ppgtt->node.size >> 20,
ppgtt->node.start / PAGE_SIZE);
@@ -1713,8 +1713,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
static void i915_gtt_color_adjust(struct drm_mm_node *node,
unsigned long color,
- unsigned long *start,
- unsigned long *end)
+ u64 *start,
+ u64 *end)
{
if (node->color != color)
*start += 4096;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index a2045848bd1a..9c6f93ec886b 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -485,10 +485,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
stolen_offset, gtt_offset, size);
/* KISS and expect everything to be page-aligned */
- BUG_ON(stolen_offset & 4095);
- BUG_ON(size & 4095);
-
- if (WARN_ON(size == 0))
+ if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
+ WARN_ON(stolen_offset & 4095))
return NULL;
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7a24bd1a51f6..6377b22269ad 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -335,9 +335,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
return -EINVAL;
}
+ mutex_lock(&dev->struct_mutex);
if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
- drm_gem_object_unreference_unlocked(&obj->base);
- return -EBUSY;
+ ret = -EBUSY;
+ goto err;
}
if (args->tiling_mode == I915_TILING_NONE) {
@@ -369,7 +370,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
}
- mutex_lock(&dev->struct_mutex);
if (args->tiling_mode != obj->tiling_mode ||
args->stride != obj->stride) {
/* We need to rebind the object if its current allocation
@@ -424,6 +424,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
obj->bit_17 = NULL;
}
+err:
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4145d95902f5..ede5bbbd8a08 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1892,6 +1892,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
u32 iir, gt_iir, pm_iir;
irqreturn_t ret = IRQ_NONE;
+ if (!intel_irqs_enabled(dev_priv))
+ return IRQ_NONE;
+
while (true) {
/* Find, clear, then process each source of interrupt */
@@ -1936,6 +1939,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
u32 master_ctl, iir;
irqreturn_t ret = IRQ_NONE;
+ if (!intel_irqs_enabled(dev_priv))
+ return IRQ_NONE;
+
for (;;) {
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
iir = I915_READ(VLV_IIR);
@@ -2208,6 +2214,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
+ if (!intel_irqs_enabled(dev_priv))
+ return IRQ_NONE;
+
/* We get interrupts on unclaimed registers, so check for this before we
* do any I915_{READ,WRITE}. */
intel_uncore_check_errors(dev);
@@ -2279,6 +2288,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
enum pipe pipe;
u32 aux_mask = GEN8_AUX_CHANNEL_A;
+ if (!intel_irqs_enabled(dev_priv))
+ return IRQ_NONE;
+
if (IS_GEN9(dev))
aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
@@ -3771,6 +3783,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ if (!intel_irqs_enabled(dev_priv))
+ return IRQ_NONE;
+
iir = I915_READ16(IIR);
if (iir == 0)
return IRQ_NONE;
@@ -3951,6 +3966,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
int pipe, ret = IRQ_NONE;
+ if (!intel_irqs_enabled(dev_priv))
+ return IRQ_NONE;
+
iir = I915_READ(IIR);
do {
bool irq_received = (iir & ~flip_mask) != 0;
@@ -4171,6 +4189,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ if (!intel_irqs_enabled(dev_priv))
+ return IRQ_NONE;
+
iir = I915_READ(IIR);
for (;;) {
@@ -4520,6 +4541,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
{
dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
dev_priv->pm.irqs_enabled = false;
+ synchronize_irq(dev_priv->dev->irq);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3d220a67f865..e730789b53b7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2371,13 +2371,19 @@ intel_alloc_plane_obj(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_gem_object *obj = NULL;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
- u32 base = plane_config->base;
+ u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
+ u32 size_aligned = round_up(plane_config->base + plane_config->size,
+ PAGE_SIZE);
+
+ size_aligned -= base_aligned;
if (plane_config->size == 0)
return false;
- obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
- plane_config->size);
+ obj = i915_gem_object_create_stolen_for_preallocated(dev,
+ base_aligned,
+ base_aligned,
+ size_aligned);
if (!obj)
return false;
@@ -2725,10 +2731,19 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
case DRM_FORMAT_XRGB8888:
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
break;
+ case DRM_FORMAT_ARGB8888:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+ plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ break;
case DRM_FORMAT_XBGR8888:
plane_ctl |= PLANE_CTL_ORDER_RGBX;
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
break;
+ case DRM_FORMAT_ABGR8888:
+ plane_ctl |= PLANE_CTL_ORDER_RGBX;
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+ plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ break;
case DRM_FORMAT_XRGB2101010:
plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
break;
@@ -6627,7 +6642,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
aligned_height = intel_fb_align_height(dev, fb->height,
plane_config->tiling);
- plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
+ plane_config->size = fb->pitches[0] * aligned_height;
DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe_name(pipe), plane, fb->width, fb->height,
@@ -7664,7 +7679,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
aligned_height = intel_fb_align_height(dev, fb->height,
plane_config->tiling);
- plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE);
+ plane_config->size = fb->pitches[0] * aligned_height;
DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe_name(pipe), fb->width, fb->height,
@@ -7755,7 +7770,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
aligned_height = intel_fb_align_height(dev, fb->height,
plane_config->tiling);
- plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
+ plane_config->size = fb->pitches[0] * aligned_height;
DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe_name(pipe), fb->width, fb->height,
@@ -8698,6 +8713,7 @@ retry:
old->release_fb->funcs->destroy(old->release_fb);
goto fail;
}
+ crtc->primary->crtc = crtc;
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -12182,9 +12198,6 @@ intel_check_cursor_plane(struct drm_plane *plane,
return -ENOMEM;
}
- if (fb == crtc->cursor->fb)
- return 0;
-
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
@@ -13096,6 +13109,9 @@ static struct intel_quirk intel_quirks[] = {
/* HP Chromebook 14 (Celeron 2955U) */
{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
+
+ /* Dell Chromebook 11 */
+ { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
};
static void intel_init_quirks(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 04e248dd2259..54daa66c6970 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -282,16 +282,6 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
return ret;
}
-static bool
-__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- return !intel_crtc->cpu_fifo_underrun_disabled;
-}
-
/**
* intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
* @dev_priv: i915 device instance
@@ -352,9 +342,15 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+ /* We may be called too early in init, thanks BIOS! */
+ if (crtc == NULL)
+ return;
+
/* GMCH can't disable fifo underruns, filter them. */
if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
- !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe))
+ to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
return;
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0f358c5999ec..e8d3da9f3373 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -503,18 +503,19 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
* If there isn't a request associated with this submission,
* create one as a temporary holder.
*/
- WARN(1, "execlist context submission without request");
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
request->ring = ring;
request->ctx = to;
+ kref_init(&request->ref);
+ request->uniq = dev_priv->request_uniq++;
+ i915_gem_context_reference(request->ctx);
} else {
+ i915_gem_request_reference(request);
WARN_ON(to != request->ctx);
}
request->tail = tail;
- i915_gem_request_reference(request);
- i915_gem_context_reference(request->ctx);
intel_runtime_pm_get(dev_priv);
@@ -731,7 +732,6 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
if (ctx_obj && (ctx != ring->default_context))
intel_lr_context_unpin(ring, ctx);
intel_runtime_pm_put(dev_priv);
- i915_gem_context_unreference(ctx);
list_del(&req->execlist_link);
i915_gem_request_unreference(req);
}
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 121d30ca2d44..87fe8ed92ebe 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -70,7 +70,9 @@ static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = {
118800000, { 0x091c, 0x091c, 0x06dc },
}, {
216000000, { 0x06dc, 0x0b5c, 0x091c },
- }
+ }, {
+ ~0UL, { 0x0000, 0x0000, 0x0000 },
+ },
};
static const struct dw_hdmi_sym_term imx_sym_term[] = {
@@ -136,11 +138,34 @@ static struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static enum drm_mode_status imx6q_hdmi_mode_valid(struct drm_connector *con,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock < 13500)
+ return MODE_CLOCK_LOW;
+ if (mode->clock > 266000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static enum drm_mode_status imx6dl_hdmi_mode_valid(struct drm_connector *con,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock < 13500)
+ return MODE_CLOCK_LOW;
+ if (mode->clock > 270000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = {
- .mpll_cfg = imx_mpll_cfg,
- .cur_ctr = imx_cur_ctr,
- .sym_term = imx_sym_term,
- .dev_type = IMX6Q_HDMI,
+ .mpll_cfg = imx_mpll_cfg,
+ .cur_ctr = imx_cur_ctr,
+ .sym_term = imx_sym_term,
+ .dev_type = IMX6Q_HDMI,
+ .mode_valid = imx6q_hdmi_mode_valid,
};
static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = {
@@ -148,6 +173,7 @@ static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = {
.cur_ctr = imx_cur_ctr,
.sym_term = imx_sym_term,
.dev_type = IMX6DL_HDMI,
+ .mode_valid = imx6dl_hdmi_mode_valid,
};
static const struct of_device_id dw_hdmi_imx_dt_ids[] = {
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 1b86aac0b341..2d6dc94e1e64 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -163,22 +163,7 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
{
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
struct imx_ldb *ldb = imx_ldb_ch->ldb;
- struct drm_display_mode *mode = &encoder->crtc->hwmode;
u32 pixel_fmt;
- unsigned long serial_clk;
- unsigned long di_clk = mode->clock * 1000;
- int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
-
- if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
- /* dual channel LVDS mode */
- serial_clk = 3500UL * mode->clock;
- imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk);
- imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
- } else {
- serial_clk = 7000UL * mode->clock;
- imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk,
- di_clk);
- }
switch (imx_ldb_ch->chno) {
case 0:
@@ -247,6 +232,9 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
struct imx_ldb *ldb = imx_ldb_ch->ldb;
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+ unsigned long serial_clk;
+ unsigned long di_clk = mode->clock * 1000;
+ int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
if (mode->clock > 170000) {
dev_warn(ldb->dev,
@@ -257,6 +245,16 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
"%s: mode exceeds 85 MHz pixel clock\n", __func__);
}
+ if (dual) {
+ serial_clk = 3500UL * mode->clock;
+ imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk);
+ imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
+ } else {
+ serial_clk = 7000UL * mode->clock;
+ imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk,
+ di_clk);
+ }
+
/* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */
if (imx_ldb_ch == &ldb->channel[0]) {
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 5e83e007080f..900dda6a8e71 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -236,8 +236,11 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
}
panel_node = of_parse_phandle(np, "fsl,panel", 0);
- if (panel_node)
+ if (panel_node) {
imxpd->panel = of_drm_find_panel(panel_node);
+ if (!imxpd->panel)
+ return -EPROBE_DEFER;
+ }
imxpd->dev = dev;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index 8edd531cb621..7369ee7f0c55 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -32,7 +32,10 @@ static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
void mdp4_irq_preinstall(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_enable(mdp4_kms);
mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+ mdp4_disable(mdp4_kms);
}
int mdp4_irq_postinstall(struct msm_kms *kms)
@@ -53,7 +56,9 @@ int mdp4_irq_postinstall(struct msm_kms *kms)
void mdp4_irq_uninstall(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_enable(mdp4_kms);
mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+ mdp4_disable(mdp4_kms);
}
irqreturn_t mdp4_irq(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 09b4a25eb553..c276624290af 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,17 +8,9 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
+- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 27229 bytes, from 2015-02-10 17:00:41)
+- /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15)
+- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-01-23 16:20:19)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -910,6 +902,7 @@ static inline uint32_t __offset_LM(uint32_t idx)
case 2: return (mdp5_cfg->lm.base[2]);
case 3: return (mdp5_cfg->lm.base[3]);
case 4: return (mdp5_cfg->lm.base[4]);
+ case 5: return (mdp5_cfg->lm.base[5]);
default: return INVALID_IDX(idx);
}
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 46fac545dc2b..2f2863cf8b45 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -62,8 +62,8 @@ struct mdp5_crtc {
/* current cursor being scanned out: */
struct drm_gem_object *scanout_bo;
- uint32_t width;
- uint32_t height;
+ uint32_t width, height;
+ uint32_t x, y;
} cursor;
};
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
@@ -103,8 +103,8 @@ static void crtc_flush_all(struct drm_crtc *crtc)
struct drm_plane *plane;
uint32_t flush_mask = 0;
- /* we could have already released CTL in the disable path: */
- if (!mdp5_crtc->ctl)
+ /* this should not happen: */
+ if (WARN_ON(!mdp5_crtc->ctl))
return;
drm_atomic_crtc_for_each_plane(plane, crtc) {
@@ -143,6 +143,11 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
drm_atomic_crtc_for_each_plane(plane, crtc) {
mdp5_plane_complete_flip(plane);
}
+
+ if (mdp5_crtc->ctl && !crtc->state->enable) {
+ mdp5_ctl_release(mdp5_crtc->ctl);
+ mdp5_crtc->ctl = NULL;
+ }
}
static void unref_cursor_worker(struct drm_flip_work *work, void *val)
@@ -386,14 +391,17 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
mdp5_crtc->event = crtc->state->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
+ /*
+ * If no CTL has been allocated in mdp5_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!mdp5_crtc->ctl))
+ return;
+
blend_setup(crtc);
crtc_flush_all(crtc);
request_pending(crtc, PENDING_FLIP);
-
- if (mdp5_crtc->ctl && !crtc->state->enable) {
- mdp5_ctl_release(mdp5_crtc->ctl);
- mdp5_crtc->ctl = NULL;
- }
}
static int mdp5_crtc_set_property(struct drm_crtc *crtc,
@@ -403,6 +411,32 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc,
return -EINVAL;
}
+static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ uint32_t xres = crtc->mode.hdisplay;
+ uint32_t yres = crtc->mode.vdisplay;
+
+ /*
+ * Cursor Region Of Interest (ROI) is a plane read from cursor
+ * buffer to render. The ROI region is determined by the visibility of
+ * the cursor point. In the default Cursor image the cursor point will
+ * be at the top left of the cursor image, unless it is specified
+ * otherwise using hotspot feature.
+ *
+ * If the cursor point reaches the right (xres - x < cursor.width) or
+ * bottom (yres - y < cursor.height) boundary of the screen, then ROI
+ * width and ROI height need to be evaluated to crop the cursor image
+ * accordingly.
+ * (xres-x) will be new cursor width when x > (xres - cursor.width)
+ * (yres-y) will be new cursor height when y > (yres - cursor.height)
+ */
+ *roi_w = min(mdp5_crtc->cursor.width, xres -
+ mdp5_crtc->cursor.x);
+ *roi_h = min(mdp5_crtc->cursor.height, yres -
+ mdp5_crtc->cursor.y);
+}
+
static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file, uint32_t handle,
uint32_t width, uint32_t height)
@@ -416,6 +450,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
unsigned int depth;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
+ uint32_t roi_w, roi_h;
unsigned long flags;
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
@@ -446,6 +481,12 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
old_bo = mdp5_crtc->cursor.scanout_bo;
+ mdp5_crtc->cursor.scanout_bo = cursor_bo;
+ mdp5_crtc->cursor.width = width;
+ mdp5_crtc->cursor.height = height;
+
+ get_roi(crtc, &roi_w, &roi_h);
+
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -453,19 +494,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
- MDP5_LM_CURSOR_SIZE_ROI_H(height) |
- MDP5_LM_CURSOR_SIZE_ROI_W(width));
+ MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
+ MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
-
blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
- blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN;
blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
- mdp5_crtc->cursor.scanout_bo = cursor_bo;
- mdp5_crtc->cursor.width = width;
- mdp5_crtc->cursor.height = height;
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true);
@@ -489,31 +525,18 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
- uint32_t xres = crtc->mode.hdisplay;
- uint32_t yres = crtc->mode.vdisplay;
uint32_t roi_w;
uint32_t roi_h;
unsigned long flags;
- x = (x > 0) ? x : 0;
- y = (y > 0) ? y : 0;
+ /* In case the CRTC is disabled, just drop the cursor update */
+ if (unlikely(!crtc->state->enable))
+ return 0;
- /*
- * Cursor Region Of Interest (ROI) is a plane read from cursor
- * buffer to render. The ROI region is determined by the visiblity of
- * the cursor point. In the default Cursor image the cursor point will
- * be at the top left of the cursor image, unless it is specified
- * otherwise using hotspot feature.
- *
- * If the cursor point reaches the right (xres - x < cursor.width) or
- * bottom (yres - y < cursor.height) boundary of the screen, then ROI
- * width and ROI height need to be evaluated to crop the cursor image
- * accordingly.
- * (xres-x) will be new cursor width when x > (xres - cursor.width)
- * (yres-y) will be new cursor height when y > (yres - cursor.height)
- */
- roi_w = min(mdp5_crtc->cursor.width, xres - x);
- roi_h = min(mdp5_crtc->cursor.height, yres - y);
+ mdp5_crtc->cursor.x = x = max(x, 0);
+ mdp5_crtc->cursor.y = y = max(y, 0);
+
+ get_roi(crtc, &roi_w, &roi_h);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
@@ -544,8 +567,8 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.mode_fixup = mdp5_crtc_mode_fixup,
.mode_set_nofb = mdp5_crtc_mode_set_nofb,
- .prepare = mdp5_crtc_disable,
- .commit = mdp5_crtc_enable,
+ .disable = mdp5_crtc_disable,
+ .enable = mdp5_crtc_enable,
.atomic_check = mdp5_crtc_atomic_check,
.atomic_begin = mdp5_crtc_atomic_begin,
.atomic_flush = mdp5_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index d6a14bb99988..af0e02fa4f48 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -267,14 +267,14 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
- mdp5_encoder->enabled = false;
+ mdp5_encoder->enabled = true;
}
static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
.mode_fixup = mdp5_encoder_mode_fixup,
.mode_set = mdp5_encoder_mode_set,
- .prepare = mdp5_encoder_disable,
- .commit = mdp5_encoder_enable,
+ .disable = mdp5_encoder_disable,
+ .enable = mdp5_encoder_enable,
};
/* initialize encoder */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 70ac81edd40f..a9407105b9b7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -34,7 +34,10 @@ static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
void mdp5_irq_preinstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ mdp5_enable(mdp5_kms);
mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+ mdp5_disable(mdp5_kms);
}
int mdp5_irq_postinstall(struct msm_kms *kms)
@@ -57,7 +60,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
void mdp5_irq_uninstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ mdp5_enable(mdp5_kms);
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+ mdp5_disable(mdp5_kms);
}
static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 871aa2108dc6..18fd643b6e69 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -219,8 +219,10 @@ int msm_atomic_commit(struct drm_device *dev,
* mark our set of crtc's as busy:
*/
ret = start_atomic(dev->dev_private, c->crtc_mask);
- if (ret)
+ if (ret) {
+ kfree(c);
return ret;
+ }
/*
* This is the point of no return - everything below never fails except
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 79924e4b1b49..6751553abe4a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -418,7 +418,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
nouveau_fbcon_zfill(dev, fbcon);
/* To allow resizeing without swapping buffers */
- NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n",
+ NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
nouveau_fb->base.width, nouveau_fb->base.height,
nvbo->bo.offset, nvbo);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index ed644a4f6f57..86807ee91bd1 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1405,6 +1405,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
(x << 16) | y);
viewport_w = crtc->mode.hdisplay;
viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+ if ((rdev->family >= CHIP_BONAIRE) &&
+ (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE))
+ viewport_h *= 2;
WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 5bf825dfaa09..8d74de82456e 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -178,6 +178,13 @@ radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_WRITE:
case DP_AUX_I2C_WRITE:
+ /* The atom implementation only supports writes with a max payload of
+ * 12 bytes since it uses 4 bits for the total count (header + payload)
+ * in the parameter space. The atom interface supports 16 byte
+ * payloads for reads. The hw itself supports up to 16 bytes of payload.
+ */
+ if (WARN_ON_ONCE(msg->size > 12))
+ return -E2BIG;
/* tx_size needs to be 4 even for bare address packets since the atom
* table needs the info in tx_buf[3].
*/
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 7c9df1eac065..c39c1d0d9d4e 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -731,7 +731,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
- if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
+ if (radeon_audio != 0 &&
+ drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
+ ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
return ATOM_ENCODER_MODE_DP_AUDIO;
return ATOM_ENCODER_MODE_DP;
} else if (radeon_audio != 0) {
@@ -747,7 +749,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
}
break;
case DRM_MODE_CONNECTOR_eDP:
- if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
+ if (radeon_audio != 0 &&
+ drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
+ ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
return ATOM_ENCODER_MODE_DP_AUDIO;
return ATOM_ENCODER_MODE_DP;
case DRM_MODE_CONNECTOR_DVIA:
@@ -1622,7 +1626,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
struct radeon_connector *radeon_connector = NULL;
struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
bool travis_quirk = false;
- int encoder_mode;
if (connector) {
radeon_connector = to_radeon_connector(connector);
@@ -1718,11 +1721,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
}
break;
}
-
- encoder_mode = atombios_get_encoder_mode(encoder);
- if (radeon_audio != 0 &&
- (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode)))
- radeon_audio_dpms(encoder, mode);
}
static void
@@ -1731,10 +1729,19 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ int encoder_mode = atombios_get_encoder_mode(encoder);
DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
radeon_encoder->encoder_id, mode, radeon_encoder->devices,
radeon_encoder->active_device);
+
+ if (connector && (radeon_audio != 0) &&
+ ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
+ (ENCODER_MODE_IS_DP(encoder_mode) &&
+ drm_detect_monitor_audio(radeon_connector_edid(connector)))))
+ radeon_audio_dpms(encoder, mode);
+
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
@@ -2136,6 +2143,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int encoder_mode;
radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -2163,10 +2171,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* handled in dpms */
- encoder_mode = atombios_get_encoder_mode(encoder);
- if (radeon_audio != 0 &&
- (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode)))
- radeon_audio_mode_set(encoder, adjusted_mode);
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
@@ -2188,6 +2192,13 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
}
atombios_apply_encoder_quirks(encoder, adjusted_mode);
+
+ encoder_mode = atombios_get_encoder_mode(encoder);
+ if (connector && (radeon_audio != 0) &&
+ ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
+ (ENCODER_MODE_IS_DP(encoder_mode) &&
+ drm_detect_monitor_audio(radeon_connector_edid(connector)))))
+ radeon_audio_mode_set(encoder, adjusted_mode);
}
static bool
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index e6a4ba236c70..3e670d344a20 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3613,6 +3613,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
}
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+ WREG32(SRBM_INT_CNTL, 0x1);
+ WREG32(SRBM_INT_ACK, 0x1);
WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
@@ -7230,6 +7232,8 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
/* grbm */
WREG32(GRBM_INT_CNTL, 0);
+ /* SRBM */
+ WREG32(SRBM_INT_CNTL, 0);
/* vline/vblank, etc. */
WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -7551,6 +7555,9 @@ int cik_irq_set(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ /* posting read */
+ RREG32(SRBM_STATUS);
+
return 0;
}
@@ -8046,6 +8053,10 @@ restart_ih:
break;
}
break;
+ case 96:
+ DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
+ WREG32(SRBM_INT_ACK, 0x1);
+ break;
case 124: /* UVD */
DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 03003f8a6de6..c648e1996dab 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -482,6 +482,10 @@
#define SOFT_RESET_ORB (1 << 23)
#define SOFT_RESET_VCE (1 << 24)
+#define SRBM_READ_ERROR 0xE98
+#define SRBM_INT_CNTL 0xEA0
+#define SRBM_INT_ACK 0xEA8
+
#define VM_L2_CNTL 0x1400
#define ENABLE_L2_CACHE (1 << 0)
#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 192c80389151..3adc2afe32aa 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -26,6 +26,9 @@
#include "radeon_audio.h"
#include "sid.h"
+#define DCE8_DCCG_AUDIO_DTO1_PHASE 0x05b8
+#define DCE8_DCCG_AUDIO_DTO1_MODULE 0x05bc
+
u32 dce6_endpoint_rreg(struct radeon_device *rdev,
u32 block_offset, u32 reg)
{
@@ -252,72 +255,67 @@ void dce6_audio_enable(struct radeon_device *rdev,
void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
struct radeon_crtc *crtc, unsigned int clock)
{
- /* Two dtos; generally use dto0 for HDMI */
+ /* Two dtos; generally use dto0 for HDMI */
u32 value = 0;
- if (crtc)
+ if (crtc)
value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
WREG32(DCCG_AUDIO_DTO_SOURCE, value);
- /* Express [24MHz / target pixel clock] as an exact rational
- * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
- * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
- */
- WREG32(DCCG_AUDIO_DTO0_PHASE, 24000);
- WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
+ /* Express [24MHz / target pixel clock] as an exact rational
+ * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
+ WREG32(DCCG_AUDIO_DTO0_PHASE, 24000);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
}
void dce6_dp_audio_set_dto(struct radeon_device *rdev,
struct radeon_crtc *crtc, unsigned int clock)
{
- /* Two dtos; generally use dto1 for DP */
+ /* Two dtos; generally use dto1 for DP */
u32 value = 0;
value |= DCCG_AUDIO_DTO_SEL;
- if (crtc)
+ if (crtc)
value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
WREG32(DCCG_AUDIO_DTO_SOURCE, value);
- /* Express [24MHz / target pixel clock] as an exact rational
- * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
- * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
- */
- WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
- WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
+ /* Express [24MHz / target pixel clock] as an exact rational
+ * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
+ if (ASIC_IS_DCE8(rdev)) {
+ WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
+ WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
+ } else {
+ WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
+ WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
+ }
}
-void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
+void dce6_dp_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- uint32_t offset;
if (!dig || !dig->afmt)
return;
- offset = dig->afmt->offset;
-
if (enable) {
- if (dig->afmt->enabled)
- return;
-
- WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
- WREG32(EVERGREEN_DP_SEC_CNTL + offset,
- EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
- EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
- EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
- EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
- radeon_audio_enable(rdev, dig->afmt->pin, true);
+ WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
+ EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
+ WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
+ EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
+ EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
+ EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
+ EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
} else {
- if (!dig->afmt->enabled)
- return;
-
- WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0);
- radeon_audio_enable(rdev, dig->afmt->pin, false);
+ WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
}
dig->afmt->enabled = enable;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 78600f534c80..973df064c14f 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3253,6 +3253,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
}
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+ WREG32(SRBM_INT_CNTL, 0x1);
+ WREG32(SRBM_INT_ACK, 0x1);
evergreen_fix_pci_max_read_req_size(rdev);
@@ -4324,6 +4326,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
WREG32(DMA_CNTL, tmp);
WREG32(GRBM_INT_CNTL, 0);
+ WREG32(SRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
@@ -4590,6 +4593,9 @@ int evergreen_irq_set(struct radeon_device *rdev)
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
+ /* posting read */
+ RREG32(SRBM_STATUS);
+
return 0;
}
@@ -5066,6 +5072,10 @@ restart_ih:
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
+ case 96:
+ DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
+ WREG32(SRBM_INT_ACK, 0x1);
+ break;
case 124: /* UVD */
DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 1d9aebc79595..c18d4ecbd95d 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -272,7 +272,7 @@ void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
}
void dce4_dp_audio_set_dto(struct radeon_device *rdev,
- struct radeon_crtc *crtc, unsigned int clock)
+ struct radeon_crtc *crtc, unsigned int clock)
{
u32 value;
@@ -294,7 +294,7 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
- WREG32(DCCG_AUDIO_DTO1_MODULE, rdev->clock.max_pixel_clock * 10);
+ WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
}
void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset)
@@ -350,20 +350,9 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
- HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
- HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
-
WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
- WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
- HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
-
- WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
- HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
- HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
-
WREG32(AFMT_60958_0 + offset,
AFMT_60958_CS_CHANNEL_NUMBER_L(1));
@@ -408,15 +397,19 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!dig || !dig->afmt)
return;
- /* Silent, r600_hdmi_enable will raise WARN for us */
- if (enable && dig->afmt->enabled)
- return;
- if (!enable && !dig->afmt->enabled)
- return;
+ if (enable) {
+ WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset,
+ HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+ WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+ HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+ HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
- if (!enable && dig->afmt->pin) {
- radeon_audio_enable(rdev, dig->afmt->pin, 0);
- dig->afmt->pin = NULL;
+ WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
+ HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
+ } else {
+ WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0);
}
dig->afmt->enabled = enable;
@@ -425,33 +418,28 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
}
-void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
+void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- uint32_t offset;
if (!dig || !dig->afmt)
return;
- offset = dig->afmt->offset;
-
if (enable) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
uint32_t val;
- if (dig->afmt->enabled)
- return;
-
- WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
+ WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
+ EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
if (radeon_connector->con_priv) {
dig_connector = radeon_connector->con_priv;
- val = RREG32(EVERGREEN_DP_SEC_AUD_N + offset);
+ val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset);
val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf);
if (dig_connector->dp_clock == 162000)
@@ -459,21 +447,16 @@ void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
else
val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5);
- WREG32(EVERGREEN_DP_SEC_AUD_N + offset, val);
+ WREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset, val);
}
- WREG32(EVERGREEN_DP_SEC_CNTL + offset,
+ WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
- radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
} else {
- if (!dig->afmt->enabled)
- return;
-
- WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0);
- radeon_audio_enable(rdev, dig->afmt->pin, 0);
+ WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
}
dig->afmt->enabled = enable;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index ee83d2a88750..a8d1d5240fcb 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1191,6 +1191,10 @@
#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
+#define SRBM_READ_ERROR 0xE98
+#define SRBM_INT_CNTL 0xEA0
+#define SRBM_INT_ACK 0xEA8
+
/* display watermarks */
#define DC_LB_MEMORY_SPLIT 0x6b0c
#define PRIORITY_A_CNT 0x6b18
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 24242a7f0ac3..dab00812abaa 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -962,6 +962,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
}
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+ WREG32(SRBM_INT_CNTL, 0x1);
+ WREG32(SRBM_INT_ACK, 0x1);
evergreen_fix_pci_max_read_req_size(rdev);
@@ -1086,12 +1088,12 @@ static void cayman_gpu_init(struct radeon_device *rdev)
if ((rdev->config.cayman.max_backends_per_se == 1) &&
(rdev->flags & RADEON_IS_IGP)) {
- if ((disabled_rb_mask & 3) == 1) {
- /* RB0 disabled, RB1 enabled */
- tmp = 0x11111111;
- } else {
+ if ((disabled_rb_mask & 3) == 2) {
/* RB1 disabled, RB0 enabled */
tmp = 0x00000000;
+ } else {
+ /* RB0 disabled, RB1 enabled */
+ tmp = 0x11111111;
}
} else {
tmp = gb_addr_config & NUM_PIPES_MASK;
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index ad7125486894..6b44580440d0 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -82,6 +82,10 @@
#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
+#define SRBM_READ_ERROR 0xE98
+#define SRBM_INT_CNTL 0xEA0
+#define SRBM_INT_ACK 0xEA8
+
#define SRBM_STATUS2 0x0EC4
#define DMA_BUSY (1 << 5)
#define DMA1_BUSY (1 << 6)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 279801ca5110..04f2514f7564 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -728,6 +728,10 @@ int r100_irq_set(struct radeon_device *rdev)
tmp |= RADEON_FP2_DETECT_MASK;
}
WREG32(RADEON_GEN_INT_CNTL, tmp);
+
+ /* read back to post the write */
+ RREG32(RADEON_GEN_INT_CNTL);
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 07a71a2488c9..2fcad344492f 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3784,6 +3784,9 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(RV770_CG_THERMAL_INT, thermal_int);
}
+ /* posting read */
+ RREG32(R_000E50_SRBM_STATUS);
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 843b65f46ece..fa2154493cf1 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -188,7 +188,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
- vrefresh = radeon_crtc->hw_mode.vrefresh;
+ vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
break;
}
}
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 62c91ed669ce..dd6606b8e23c 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -476,17 +476,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!dig || !dig->afmt)
return;
- /* Silent, r600_hdmi_enable will raise WARN for us */
- if (enable && dig->afmt->enabled)
- return;
- if (!enable && !dig->afmt->enabled)
- return;
-
- if (!enable && dig->afmt->pin) {
- radeon_audio_enable(rdev, dig->afmt->pin, 0);
- dig->afmt->pin = NULL;
- }
-
/* Older chipsets require setting HDMI and routing manually */
if (!ASIC_IS_DCE3(rdev)) {
if (enable)
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index a3ceef6d9632..b21ef69a34ac 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -101,8 +101,8 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode);
void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
-void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable);
-void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable);
+void evergreen_dp_enable(struct drm_encoder *encoder, bool enable);
+void dce6_dp_enable(struct drm_encoder *encoder, bool enable);
static const u32 pin_offsets[7] =
{
@@ -210,7 +210,7 @@ static struct radeon_audio_funcs dce4_dp_funcs = {
.set_avi_packet = evergreen_set_avi_packet,
.set_audio_packet = dce4_set_audio_packet,
.mode_set = radeon_audio_dp_mode_set,
- .dpms = evergreen_enable_dp_audio_packets,
+ .dpms = evergreen_dp_enable,
};
static struct radeon_audio_funcs dce6_hdmi_funcs = {
@@ -240,7 +240,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
.set_avi_packet = evergreen_set_avi_packet,
.set_audio_packet = dce4_set_audio_packet,
.mode_set = radeon_audio_dp_mode_set,
- .dpms = dce6_enable_dp_audio_packets,
+ .dpms = dce6_dp_enable,
};
static void radeon_audio_interface_init(struct radeon_device *rdev)
@@ -452,7 +452,7 @@ void radeon_audio_enable(struct radeon_device *rdev,
}
void radeon_audio_detect(struct drm_connector *connector,
- enum drm_connector_status status)
+ enum drm_connector_status status)
{
struct radeon_device *rdev;
struct radeon_encoder *radeon_encoder;
@@ -483,14 +483,11 @@ void radeon_audio_detect(struct drm_connector *connector,
else
radeon_encoder->audio = rdev->audio.hdmi_funcs;
- radeon_audio_write_speaker_allocation(connector->encoder);
- radeon_audio_write_sad_regs(connector->encoder);
- if (connector->encoder->crtc)
- radeon_audio_write_latency_fields(connector->encoder,
- &connector->encoder->crtc->mode);
+ dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
} else {
radeon_audio_enable(rdev, dig->afmt->pin, 0);
+ dig->afmt->pin = NULL;
}
}
@@ -694,23 +691,22 @@ static void radeon_audio_set_mute(struct drm_encoder *encoder, bool mute)
* update the info frames with the data from the current display mode
*/
static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
- struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
if (!dig || !dig->afmt)
return;
- /* disable audio prior to setting up hw */
- dig->afmt->pin = radeon_audio_get_pin(encoder);
- radeon_audio_enable(rdev, dig->afmt->pin, 0);
+ radeon_audio_set_mute(encoder, true);
+ radeon_audio_write_speaker_allocation(encoder);
+ radeon_audio_write_sad_regs(encoder);
+ radeon_audio_write_latency_fields(encoder, mode);
radeon_audio_set_dto(encoder, mode->clock);
radeon_audio_set_vbi_packet(encoder);
radeon_hdmi_set_color_depth(encoder);
- radeon_audio_set_mute(encoder, false);
radeon_audio_update_acr(encoder, mode->clock);
radeon_audio_set_audio_packet(encoder);
radeon_audio_select_pin(encoder);
@@ -718,8 +714,7 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
if (radeon_audio_set_avi_packet(encoder, mode) < 0)
return;
- /* enable audio after to setting up hw */
- radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+ radeon_audio_set_mute(encoder, false);
}
static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
@@ -729,23 +724,26 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
if (!dig || !dig->afmt)
return;
- /* disable audio prior to setting up hw */
- dig->afmt->pin = radeon_audio_get_pin(encoder);
- radeon_audio_enable(rdev, dig->afmt->pin, 0);
-
- radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
+ radeon_audio_write_speaker_allocation(encoder);
+ radeon_audio_write_sad_regs(encoder);
+ radeon_audio_write_latency_fields(encoder, mode);
+ if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
+ radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
+ else
+ radeon_audio_set_dto(encoder, dig_connector->dp_clock);
radeon_audio_set_audio_packet(encoder);
radeon_audio_select_pin(encoder);
if (radeon_audio_set_avi_packet(encoder, mode) < 0)
return;
-
- /* enable audio after to setting up hw */
- radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
}
void radeon_audio_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index c830863bc98a..4d0f96cc3da4 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -256,11 +256,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
u32 ring = RADEON_CS_RING_GFX;
s32 priority = 0;
+ INIT_LIST_HEAD(&p->validated);
+
if (!cs->num_chunks) {
return 0;
}
+
/* get chunks */
- INIT_LIST_HEAD(&p->validated);
p->idx = 0;
p->ib.sa_bo = NULL;
p->const_ib.sa_bo = NULL;
@@ -715,6 +717,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
struct radeon_device *rdev = p->rdev;
uint32_t header;
+ int ret = 0, i;
if (idx >= ib_chunk->length_dw) {
DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
@@ -743,14 +746,25 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
break;
default:
DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
- return -EINVAL;
+ ret = -EINVAL;
+ goto dump_ib;
}
if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
- return -EINVAL;
+ ret = -EINVAL;
+ goto dump_ib;
}
return 0;
+
+dump_ib:
+ for (i = 0; i < ib_chunk->length_dw; i++) {
+ if (i == idx)
+ printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
+ else
+ printk("\t0x%08x\n", radeon_get_ib_value(p, i));
+ }
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 6b670b0bc47b..3a297037cc17 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -179,9 +179,12 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
(rdev->pdev->subsystem_vendor == 0x1734) &&
(rdev->pdev->subsystem_device == 0x1107))
use_bl = false;
+/* Older PPC macs use on-GPU backlight controller */
+#ifndef CONFIG_PPC_PMAC
/* disable native backlight control on older asics */
else if (rdev->family < CHIP_R600)
use_bl = false;
+#endif
else
use_bl = true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 9f758d39420d..33cf4108386d 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -852,6 +852,12 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
single_display = false;
}
+ /* 120hz tends to be problematic even if they are under the
+ * vblank limit.
+ */
+ if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
+ single_display = false;
+
/* certain older asics have a separare 3D performance state,
* so try that first if the user selected performance
*/
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d81182ad53ec..97a904835759 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -694,6 +694,10 @@ int rs600_irq_set(struct radeon_device *rdev)
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
if (ASIC_IS_DCE2(rdev))
WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+
+ /* posting read */
+ RREG32(R_000040_GEN_INT_CNTL);
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 73107fe9e46f..e088e5558da0 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3162,6 +3162,8 @@ static void si_gpu_init(struct radeon_device *rdev)
}
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+ WREG32(SRBM_INT_CNTL, 1);
+ WREG32(SRBM_INT_ACK, 1);
evergreen_fix_pci_max_read_req_size(rdev);
@@ -4699,12 +4701,6 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
switch (pkt.type) {
case RADEON_PACKET_TYPE0:
dev_err(rdev->dev, "Packet0 not allowed!\n");
- for (i = 0; i < ib->length_dw; i++) {
- if (i == idx)
- printk("\t0x%08x <---\n", ib->ptr[i]);
- else
- printk("\t0x%08x\n", ib->ptr[i]);
- }
ret = -EINVAL;
break;
case RADEON_PACKET_TYPE2:
@@ -4736,8 +4732,15 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
ret = -EINVAL;
break;
}
- if (ret)
+ if (ret) {
+ for (i = 0; i < ib->length_dw; i++) {
+ if (i == idx)
+ printk("\t0x%08x <---\n", ib->ptr[i]);
+ else
+ printk("\t0x%08x\n", ib->ptr[i]);
+ }
break;
+ }
} while (idx < ib->length_dw);
return ret;
@@ -5910,6 +5913,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
WREG32(GRBM_INT_CNTL, 0);
+ WREG32(SRBM_INT_CNTL, 0);
if (rdev->num_crtc >= 2) {
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -6199,6 +6203,9 @@ int si_irq_set(struct radeon_device *rdev)
WREG32(CG_THERMAL_INT, thermal_int);
+ /* posting read */
+ RREG32(SRBM_STATUS);
+
return 0;
}
@@ -6609,6 +6616,10 @@ restart_ih:
break;
}
break;
+ case 96:
+ DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
+ WREG32(SRBM_INT_ACK, 0x1);
+ break;
case 124: /* UVD */
DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index cbd91d226f3c..99a9835c9f61 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -358,6 +358,10 @@
#define CC_SYS_RB_BACKEND_DISABLE 0xe80
#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
+#define SRBM_READ_ERROR 0xE98
+#define SRBM_INT_CNTL 0xEA0
+#define SRBM_INT_ACK 0xEA8
+
#define SRBM_STATUS2 0x0EC4
#define DMA_BUSY (1 << 5)
#define DMA1_BUSY (1 << 6)
@@ -908,8 +912,8 @@
#define DCCG_AUDIO_DTO0_PHASE 0x05b0
#define DCCG_AUDIO_DTO0_MODULE 0x05b4
-#define DCCG_AUDIO_DTO1_PHASE 0x05b8
-#define DCCG_AUDIO_DTO1_MODULE 0x05bc
+#define DCCG_AUDIO_DTO1_PHASE 0x05c0
+#define DCCG_AUDIO_DTO1_MODULE 0x05c4
#define AFMT_AUDIO_SRC_CONTROL 0x713c
#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 3aaa84ae2681..1a52522f5da7 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -997,8 +997,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
crtc->state = NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (state)
+ if (state) {
crtc->state = &state->base;
+ crtc->state->crtc = crtc;
+ }
}
static struct drm_crtc_state *
@@ -1012,6 +1014,7 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
return NULL;
copy->base.mode_changed = false;
+ copy->base.active_changed = false;
copy->base.planes_changed = false;
copy->base.event = NULL;
@@ -1227,9 +1230,6 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
/* program display mode */
tegra_dc_set_timings(dc, mode);
- if (dc->soc->supports_border_color)
- tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
-
/* interlacing isn't supported yet, so disable it */
if (dc->soc->supports_interlacing) {
value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL);
@@ -1252,42 +1252,7 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
static void tegra_crtc_prepare(struct drm_crtc *crtc)
{
- struct tegra_dc *dc = to_tegra_dc(crtc);
- unsigned int syncpt;
- unsigned long value;
-
drm_crtc_vblank_off(crtc);
-
- if (dc->pipe)
- syncpt = SYNCPT_VBLANK1;
- else
- syncpt = SYNCPT_VBLANK0;
-
- /* initialize display controller */
- tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
- tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
-
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
-
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
- WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
-
- /* initialize timer */
- value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
- WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
- tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
-
- value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
- WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
- tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
-
- value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
-
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
}
static void tegra_crtc_commit(struct drm_crtc *crtc)
@@ -1664,6 +1629,8 @@ static int tegra_dc_init(struct host1x_client *client)
struct tegra_drm *tegra = drm->dev_private;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
+ unsigned int syncpt;
+ u32 value;
int err;
if (tegra->domain) {
@@ -1730,6 +1697,40 @@ static int tegra_dc_init(struct host1x_client *client)
goto cleanup;
}
+ /* initialize display controller */
+ if (dc->pipe)
+ syncpt = SYNCPT_VBLANK1;
+ else
+ syncpt = SYNCPT_VBLANK0;
+
+ tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+ tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+ /* initialize timer */
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
+ WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
+ WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+ value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ if (dc->soc->supports_border_color)
+ tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
+
return 0;
cleanup:
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 7e06657ae58b..7eaaee74a039 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -851,6 +851,14 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
h_back_porch = mode->htotal - mode->hsync_end;
h_front_porch = mode->hsync_start - mode->hdisplay;
+ err = clk_set_rate(hdmi->clk, pclk);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
+ err);
+ }
+
+ DRM_DEBUG_KMS("HDMI clock rate: %lu Hz\n", clk_get_rate(hdmi->clk));
+
/* power up sequence */
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0);
value &= ~SOR_PLL_PDBG;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index d395b0bef73b..8d9b7de25613 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -74,7 +74,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
pr_err(" has_type: %d\n", man->has_type);
pr_err(" use_type: %d\n", man->use_type);
pr_err(" flags: 0x%08X\n", man->flags);
- pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset);
+ pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset);
pr_err(" size: %llu\n", man->size);
pr_err(" available_caching: 0x%08X\n", man->available_caching);
pr_err(" default_caching: 0x%08X\n", man->default_caching);
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index b61d6be97602..3ddfb3d0b64d 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -459,6 +459,8 @@ static void ipu_di_config_clock(struct ipu_di *di,
clkrate = clk_get_rate(di->clk_ipu);
div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock);
+ if (div == 0)
+ div = 1;
rate = clkrate / div;
error = rate / (sig->mode.pixelclock / 1000);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index db4fb6e1cc5b..7c669c328c4c 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1872,6 +1872,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
@@ -1926,6 +1927,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
#endif
#if IS_ENABLED(CONFIG_HID_SAITEK)
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 46edb4d3ed28..204312bfab2c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -654,6 +654,7 @@
#define USB_DEVICE_ID_MS_LK6K 0x00f9
#define USB_DEVICE_ID_MS_PRESENTER_8K_BT 0x0701
#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
+#define USB_DEVICE_ID_MS_NE7K 0x071d
#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799
@@ -802,6 +803,7 @@
#define USB_VENDOR_ID_SAITEK 0x06a3
#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
#define USB_DEVICE_ID_SAITEK_PS1000 0x0621
+#define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb
#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index fbaea6eb882e..af935eb198c9 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -264,6 +264,8 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_ERGONOMY },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP),
.driver_data = MS_ERGONOMY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K),
+ .driver_data = MS_ERGONOMY },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K),
.driver_data = MS_ERGONOMY | MS_RDESC },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB),
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 5632c54eadf0..a014f21275d8 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -177,6 +177,8 @@ static int saitek_event(struct hid_device *hdev, struct hid_field *field,
static const struct hid_device_id saitek_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000),
.driver_data = SAITEK_FIX_PS1000 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD),
+ .driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 6a58b6c723aa..e54ce1097e2c 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -135,8 +135,9 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
{
struct hid_sensor_hub_callbacks_list *callback;
struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
+ unsigned long flags;
- spin_lock(&pdata->dyn_callback_lock);
+ spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
list_for_each_entry(callback, &pdata->dyn_callback_list, list)
if (callback->usage_id == usage_id &&
(collection_index >=
@@ -145,10 +146,11 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
callback->hsdev->end_collection_index)) {
*priv = callback->priv;
*hsdev = callback->hsdev;
- spin_unlock(&pdata->dyn_callback_lock);
+ spin_unlock_irqrestore(&pdata->dyn_callback_lock,
+ flags);
return callback->usage_callback;
}
- spin_unlock(&pdata->dyn_callback_lock);
+ spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
return NULL;
}
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 31e9d2561106..1896c019e302 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -804,7 +804,7 @@ union sixaxis_output_report_01 {
#define DS4_REPORT_0x81_SIZE 7
#define SIXAXIS_REPORT_0xF2_SIZE 18
-static spinlock_t sony_dev_list_lock;
+static DEFINE_SPINLOCK(sony_dev_list_lock);
static LIST_HEAD(sony_device_list);
static DEFINE_IDA(sony_device_id_allocator);
@@ -1944,6 +1944,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
return -ENOMEM;
}
+ spin_lock_init(&sc->lock);
+
sc->quirks = quirks;
hid_set_drvdata(hdev, sc);
sc->hdev = hdev;
@@ -2147,8 +2149,8 @@ static void __exit sony_exit(void)
{
dbg_hid("Sony:%s\n", __func__);
- ida_destroy(&sony_device_id_allocator);
hid_unregister_driver(&sony_driver);
+ ida_destroy(&sony_device_id_allocator);
}
module_init(sony_init);
module_exit(sony_exit);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index d43e967e7533..36053f33d6d9 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -370,7 +370,10 @@ static int i2c_hid_hwreset(struct i2c_client *client)
static void i2c_hid_get_input(struct i2c_hid *ihid)
{
int ret, ret_size;
- int size = ihid->bufsize;
+ int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
+
+ if (size > ihid->bufsize)
+ size = ihid->bufsize;
ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
if (ret != size) {
@@ -785,7 +788,7 @@ static int i2c_hid_init_irq(struct i2c_client *client)
dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
client->name, ihid);
if (ret < 0) {
dev_warn(&client->dev,
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 1a6507999a65..046351cf17f3 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -778,6 +778,11 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4]));
input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6]));
input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8]));
+ if ((data[2] & 0x07) | data[4] | data[5] | data[6] | data[7] | data[8] | data[9]) {
+ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+ } else {
+ input_report_abs(input, ABS_MISC, 0);
+ }
} else if (features->type == CINTIQ_HYBRID) {
/*
* Do not send hardware buttons under Android. They
@@ -2725,9 +2730,9 @@ static const struct wacom_features wacom_features_0xF6 =
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x32A =
- { "Wacom Cintiq 27QHD", 119740, 67520, 2047,
- 63, WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
- WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63,
+ WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x32B =
{ "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63,
WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index bce4e9ff21bf..6c99ee7bafa3 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -147,6 +147,9 @@ static int ads7828_probe(struct i2c_client *client,
&ads2830_regmap_config);
}
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3;
if (!diff_input)
data->cmd_byte |= ADS7828_CMD_SD_SE;
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 3c92780bda09..ff48da61c94c 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1755,7 +1755,7 @@ init_card(struct hfc_pci *hc)
enable_hwirq(hc);
spin_unlock_irqrestore(&hc->lock, flags);
/* Timeout 80ms */
- current->state = TASK_UNINTERRUPTIBLE;
+ set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout((80 * HZ) / 1000);
printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
hc->irq, hc->irqcnt);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c8d2bac4e28b..cadf9cc02b25 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2555,7 +2555,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
return err ? err : len;
}
static struct rdev_sysfs_entry rdev_state =
-__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
+__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
static ssize_t
errors_show(struct md_rdev *rdev, char *page)
@@ -3638,7 +3638,8 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
return err ?: len;
}
static struct md_sysfs_entry md_resync_start =
-__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
+__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
+ resync_start_show, resync_start_store);
/*
* The array state can be:
@@ -3851,7 +3852,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
return err ?: len;
}
static struct md_sysfs_entry md_array_state =
-__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
+__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
static ssize_t
max_corrected_read_errors_show(struct mddev *mddev, char *page) {
@@ -4101,7 +4102,7 @@ out_unlock:
}
static struct md_sysfs_entry md_metadata =
-__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
+__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
static ssize_t
action_show(struct mddev *mddev, char *page)
@@ -4189,7 +4190,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
}
static struct md_sysfs_entry md_scan_mode =
-__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
+__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
static ssize_t
last_sync_action_show(struct mddev *mddev, char *page)
@@ -4335,7 +4336,8 @@ sync_completed_show(struct mddev *mddev, char *page)
return sprintf(page, "%llu / %llu\n", resync, max_sectors);
}
-static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
+static struct md_sysfs_entry md_sync_completed =
+ __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
static ssize_t
min_sync_show(struct mddev *mddev, char *page)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4153da5d4011..d34e238afa54 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -560,7 +560,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
if (test_bit(WriteMostly, &rdev->flags)) {
/* Don't balance among write-mostly, just
* use the first as a last resort */
- if (best_disk < 0) {
+ if (best_dist_disk < 0) {
if (is_badblock(rdev, this_sector, sectors,
&first_bad, &bad_sectors)) {
if (first_bad < this_sector)
@@ -569,7 +569,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
best_good_sectors = first_bad - this_sector;
} else
best_good_sectors = sectors;
- best_disk = disk;
+ best_dist_disk = disk;
+ best_pending_disk = disk;
}
continue;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e75d48c0421a..cd2f96b2c572 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5121,12 +5121,17 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
schedule_timeout_uninterruptible(1);
}
/* Need to check if array will still be degraded after recovery/resync
- * We don't need to check the 'failed' flag as when that gets set,
- * recovery aborts.
+ * Note in case of > 1 drive failures it's possible we're rebuilding
+ * one drive while leaving another faulty drive in array.
*/
- for (i = 0; i < conf->raid_disks; i++)
- if (conf->disks[i].rdev == NULL)
+ rcu_read_lock();
+ for (i = 0; i < conf->raid_disks; i++) {
+ struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
+
+ if (rdev == NULL || test_bit(Faulty, &rdev->flags))
still_degraded = 1;
+ }
+ rcu_read_unlock();
bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 84673ebcf428..df51d6025a90 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -157,7 +157,7 @@ config IPVLAN
making it transparent to the connected L2 switch.
Ipvlan devices can be added using the "ip" command from the
- iproute2 package starting with the iproute2-X.Y.ZZ release:
+ iproute2 package starting with the iproute2-3.19 release:
"ip link add link <main-dev> [ NAME ] type ipvlan"
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index 4ce6ca5f3d36..dc6b78e5342f 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -40,7 +40,7 @@ config DEV_APPLETALK
config LTPC
tristate "Apple/Farallon LocalTalk PC support"
- depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API
+ depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS
help
This allows you to use the AppleTalk PC card to connect to LocalTalk
networks. The card is also known as the Farallon PhoneNet PC card.
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index ee9f650d5026..7b7053d3c5fa 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -105,8 +105,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \
{ \
u32 indir, dir; \
spin_lock(&priv->indir_lock); \
- indir = reg_readl(priv, REG_DIR_DATA_READ); \
dir = __raw_readl(priv->name + off); \
+ indir = reg_readl(priv, REG_DIR_DATA_READ); \
spin_unlock(&priv->indir_lock); \
return (u64)indir << 32 | dir; \
} \
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 7769c05543f1..ec6eac1f8c95 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev)
link->open++;
info->link_status = 0x00;
- init_timer(&info->watchdog);
- info->watchdog.function = ei_watchdog;
- info->watchdog.data = (u_long)dev;
- info->watchdog.expires = jiffies + HZ;
- add_timer(&info->watchdog);
+ setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+ mod_timer(&info->watchdog, jiffies + HZ);
return ax_open(dev);
} /* axnet_open */
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 9fb7b9d4fd6c..2777289a26c0 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev)
info->phy_id = info->eth_phy;
info->link_status = 0x00;
- init_timer(&info->watchdog);
- info->watchdog.function = ei_watchdog;
- info->watchdog.data = (u_long)dev;
- info->watchdog.expires = jiffies + HZ;
- add_timer(&info->watchdog);
+ setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+ mod_timer(&info->watchdog, jiffies + HZ);
return ei_open(dev);
} /* pcnet_open */
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 760c72c6e2ac..6725dc00750b 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
u16 pktlength;
u16 pktstatus;
- while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) {
+ while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) &&
+ (count < limit)) {
pktstatus = rxstatus >> 16;
pktlength = rxstatus & 0xffff;
@@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget)
struct altera_tse_private *priv =
container_of(napi, struct altera_tse_private, napi);
int rxcomplete = 0;
- int txcomplete = 0;
unsigned long int flags;
- txcomplete = tse_tx_complete(priv);
+ tse_tx_complete(priv);
rxcomplete = tse_rx(priv, budget);
- if (rxcomplete >= budget || txcomplete > 0)
- return rxcomplete;
+ if (rxcomplete < budget) {
- napi_gro_flush(napi, false);
- __napi_complete(napi);
+ napi_gro_flush(napi, false);
+ __napi_complete(napi);
- netdev_dbg(priv->dev,
- "NAPI Complete, did %d packets with budget %d\n",
- txcomplete+rxcomplete, budget);
+ netdev_dbg(priv->dev,
+ "NAPI Complete, did %d packets with budget %d\n",
+ rxcomplete, budget);
- spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
- priv->dmaops->enable_rxirq(priv);
- priv->dmaops->enable_txirq(priv);
- spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
- return rxcomplete + txcomplete;
+ spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+ priv->dmaops->enable_rxirq(priv);
+ priv->dmaops->enable_txirq(priv);
+ spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+ }
+ return rxcomplete;
}
/* DMA TX & RX FIFO interrupt routing
@@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct altera_tse_private *priv;
- unsigned long int flags;
if (unlikely(!dev)) {
pr_err("%s: invalid dev pointer\n", __func__);
@@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
}
priv = netdev_priv(dev);
- /* turn off desc irqs and enable napi rx */
- spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+ spin_lock(&priv->rxdma_irq_lock);
+ /* reset IRQs */
+ priv->dmaops->clear_rxirq(priv);
+ priv->dmaops->clear_txirq(priv);
+ spin_unlock(&priv->rxdma_irq_lock);
if (likely(napi_schedule_prep(&priv->napi))) {
+ spin_lock(&priv->rxdma_irq_lock);
priv->dmaops->disable_rxirq(priv);
priv->dmaops->disable_txirq(priv);
+ spin_unlock(&priv->rxdma_irq_lock);
__napi_schedule(&priv->napi);
}
- /* reset IRQs */
- priv->dmaops->clear_rxirq(priv);
- priv->dmaops->clear_txirq(priv);
-
- spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
return IRQ_HANDLED;
}
@@ -1399,7 +1398,7 @@ static int altera_tse_probe(struct platform_device *pdev)
}
if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
- &priv->rx_fifo_depth)) {
+ &priv->tx_fifo_depth)) {
dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
ret = -ENXIO;
goto err_free_netdev;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index b93d4404d975..885b02b5be07 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
}
}
+static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int i;
+ int ret;
+
+ ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
+ netdev->name, pdata);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ pdata->dev_irq);
+ return ret;
+ }
+
+ if (!pdata->per_channel_irq)
+ return 0;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ snprintf(channel->dma_irq_name,
+ sizeof(channel->dma_irq_name) - 1,
+ "%s-TxRx-%u", netdev_name(netdev),
+ channel->queue_index);
+
+ ret = devm_request_irq(pdata->dev, channel->dma_irq,
+ xgbe_dma_isr, 0,
+ channel->dma_irq_name, channel);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ channel->dma_irq);
+ goto err_irq;
+ }
+ }
+
+ return 0;
+
+err_irq:
+ /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+ for (i--, channel--; i < pdata->channel_count; i--, channel--)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ return ret;
+}
+
+static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ if (!pdata->per_channel_irq)
+ return;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+}
+
void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
return -EINVAL;
}
- phy_stop(pdata->phydev);
-
spin_lock_irqsave(&pdata->lock, flags);
if (caller == XGMAC_DRIVER_CONTEXT)
netif_device_detach(netdev);
netif_tx_stop_all_queues(netdev);
- xgbe_napi_disable(pdata, 0);
- /* Powerdown Tx/Rx */
hw_if->powerdown_tx(pdata);
hw_if->powerdown_rx(pdata);
+ xgbe_napi_disable(pdata, 0);
+
+ phy_stop(pdata->phydev);
+
pdata->power_down = 1;
spin_unlock_irqrestore(&pdata->lock, flags);
@@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
phy_start(pdata->phydev);
- /* Enable Tx/Rx */
+ xgbe_napi_enable(pdata, 0);
+
hw_if->powerup_tx(pdata);
hw_if->powerup_rx(pdata);
if (caller == XGMAC_DRIVER_CONTEXT)
netif_device_attach(netdev);
- xgbe_napi_enable(pdata, 0);
netif_tx_start_all_queues(netdev);
spin_unlock_irqrestore(&pdata->lock, flags);
@@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct net_device *netdev = pdata->netdev;
+ int ret;
DBGPR("-->xgbe_start\n");
@@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
phy_start(pdata->phydev);
+ xgbe_napi_enable(pdata, 1);
+
+ ret = xgbe_request_irqs(pdata);
+ if (ret)
+ goto err_napi;
+
hw_if->enable_tx(pdata);
hw_if->enable_rx(pdata);
xgbe_init_tx_timers(pdata);
- xgbe_napi_enable(pdata, 1);
netif_tx_start_all_queues(netdev);
DBGPR("<--xgbe_start\n");
return 0;
+
+err_napi:
+ xgbe_napi_disable(pdata, 1);
+
+ phy_stop(pdata->phydev);
+
+ hw_if->exit(pdata);
+
+ return ret;
}
static void xgbe_stop(struct xgbe_prv_data *pdata)
@@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_stop\n");
- phy_stop(pdata->phydev);
-
netif_tx_stop_all_queues(netdev);
- xgbe_napi_disable(pdata, 1);
xgbe_stop_tx_timers(pdata);
hw_if->disable_tx(pdata);
hw_if->disable_rx(pdata);
+ xgbe_free_irqs(pdata);
+
+ xgbe_napi_disable(pdata, 1);
+
+ phy_stop(pdata->phydev);
+
+ hw_if->exit(pdata);
+
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (!channel->tx_ring)
@@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned int i;
-
DBGPR("-->xgbe_restart_dev\n");
/* If not running, "restart" will happen on open */
@@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
return;
xgbe_stop(pdata);
- synchronize_irq(pdata->dev_irq);
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- synchronize_irq(channel->dma_irq);
- }
xgbe_free_tx_data(pdata);
xgbe_free_rx_data(pdata);
- /* Issue software reset to device */
- hw_if->exit(pdata);
-
xgbe_start(pdata);
DBGPR("<--xgbe_restart_dev\n");
@@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
static int xgbe_open(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_channel *channel = NULL;
- unsigned int i = 0;
int ret;
DBGPR("-->xgbe_open\n");
@@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev)
INIT_WORK(&pdata->restart_work, xgbe_restart);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
- /* Request interrupts */
- ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
- netdev->name, pdata);
- if (ret) {
- netdev_alert(netdev, "error requesting irq %d\n",
- pdata->dev_irq);
- goto err_rings;
- }
-
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- snprintf(channel->dma_irq_name,
- sizeof(channel->dma_irq_name) - 1,
- "%s-TxRx-%u", netdev_name(netdev),
- channel->queue_index);
-
- ret = devm_request_irq(pdata->dev, channel->dma_irq,
- xgbe_dma_isr, 0,
- channel->dma_irq_name, channel);
- if (ret) {
- netdev_alert(netdev,
- "error requesting irq %d\n",
- channel->dma_irq);
- goto err_irq;
- }
- }
- }
-
ret = xgbe_start(pdata);
if (ret)
- goto err_start;
+ goto err_rings;
DBGPR("<--xgbe_open\n");
return 0;
-err_start:
- hw_if->exit(pdata);
-
-err_irq:
- if (pdata->per_channel_irq) {
- /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
- for (i--, channel--; i < pdata->channel_count; i--, channel--)
- devm_free_irq(pdata->dev, channel->dma_irq, channel);
- }
-
- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
-
err_rings:
desc_if->free_ring_resources(pdata);
@@ -1399,30 +1424,16 @@ err_phy_init:
static int xgbe_close(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_channel *channel;
- unsigned int i;
DBGPR("-->xgbe_close\n");
/* Stop the device */
xgbe_stop(pdata);
- /* Issue software reset to device */
- hw_if->exit(pdata);
-
/* Free the ring descriptors and buffers */
desc_if->free_ring_resources(pdata);
- /* Release the interrupts */
- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- devm_free_irq(pdata->dev, channel->dma_irq, channel);
- }
-
/* Free the channel and ring structures */
xgbe_free_channels(pdata);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 5b308a4a4d0e..783543ad1fcf 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
/* RBUF misc statistics */
STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
- STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
- STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
- STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
+ STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+ STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
+ STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
};
#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
s = &bcm_sysport_gstrings_stats[i];
switch (s->type) {
case BCM_SYSPORT_STAT_NETDEV:
+ case BCM_SYSPORT_STAT_SOFT:
continue;
case BCM_SYSPORT_STAT_MIB_RX:
case BCM_SYSPORT_STAT_MIB_TX:
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index fc19417d82a5..7e3d87a88c76 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -570,6 +570,7 @@ enum bcm_sysport_stat_type {
BCM_SYSPORT_STAT_RUNT,
BCM_SYSPORT_STAT_RXCHK,
BCM_SYSPORT_STAT_RBUF,
+ BCM_SYSPORT_STAT_SOFT,
};
/* Macros to help define ethtool statistics */
@@ -590,6 +591,7 @@ enum bcm_sysport_stat_type {
#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
+#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT)
#define STAT_RXCHK(str, m, ofs) { \
.stat_string = str, \
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index ff83c46bc389..6befde61c203 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -487,6 +487,7 @@ enum bcmgenet_stat_type {
BCMGENET_STAT_MIB_TX,
BCMGENET_STAT_RUNT,
BCMGENET_STAT_MISC,
+ BCMGENET_STAT_SOFT,
};
struct bcmgenet_stats {
@@ -515,6 +516,7 @@ struct bcmgenet_stats {
#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
+#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
#define STAT_GENET_MISC(str, m, offset) { \
.stat_string = str, \
@@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
UMAC_RBUF_OVFL_CNT),
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
- STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
- STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
- STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
+ STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+ STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
+ STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
};
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
s = &bcmgenet_gstrings_stats[i];
switch (s->type) {
case BCMGENET_STAT_NETDEV:
+ case BCMGENET_STAT_SOFT:
continue;
case BCMGENET_STAT_MIB_RX:
case BCMGENET_STAT_MIB_TX:
@@ -971,13 +974,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
}
/* Unlocked version of the reclaim routine */
-static void __bcmgenet_tx_reclaim(struct net_device *dev,
- struct bcmgenet_tx_ring *ring)
+static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
+ struct bcmgenet_tx_ring *ring)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
int last_tx_cn, last_c_index, num_tx_bds;
struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
+ unsigned int pkts_compl = 0;
unsigned int bds_compl;
unsigned int c_index;
@@ -1005,6 +1009,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
tx_cb_ptr = ring->cbs + last_c_index;
bds_compl = 0;
if (tx_cb_ptr->skb) {
+ pkts_compl++;
bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
dev->stats.tx_bytes += tx_cb_ptr->skb->len;
dma_unmap_single(&dev->dev,
@@ -1028,23 +1033,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
last_c_index &= (num_tx_bds - 1);
}
- if (ring->free_bds > (MAX_SKB_FRAGS + 1))
- ring->int_disable(priv, ring);
-
- if (netif_tx_queue_stopped(txq))
- netif_tx_wake_queue(txq);
+ if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+ if (netif_tx_queue_stopped(txq))
+ netif_tx_wake_queue(txq);
+ }
ring->c_index = c_index;
+
+ return pkts_compl;
}
-static void bcmgenet_tx_reclaim(struct net_device *dev,
+static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring)
{
+ unsigned int released;
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
- __bcmgenet_tx_reclaim(dev, ring);
+ released = __bcmgenet_tx_reclaim(dev, ring);
spin_unlock_irqrestore(&ring->lock, flags);
+
+ return released;
+}
+
+static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmgenet_tx_ring *ring =
+ container_of(napi, struct bcmgenet_tx_ring, napi);
+ unsigned int work_done = 0;
+
+ work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
+
+ if (work_done == 0) {
+ napi_complete(napi);
+ ring->int_enable(ring->priv, ring);
+
+ return 0;
+ }
+
+ return budget;
}
static void bcmgenet_tx_reclaim_all(struct net_device *dev)
@@ -1302,10 +1329,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
bcmgenet_tdma_ring_writel(priv, ring->index,
ring->prod_index, TDMA_PROD_INDEX);
- if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
+ if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
netif_tx_stop_queue(txq);
- ring->int_enable(priv, ring);
- }
out:
spin_unlock_irqrestore(&ring->lock, flags);
@@ -1621,6 +1646,7 @@ static int init_umac(struct bcmgenet_priv *priv)
struct device *kdev = &priv->pdev->dev;
int ret;
u32 reg, cpu_mask_clear;
+ int index;
dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
@@ -1647,7 +1673,7 @@ static int init_umac(struct bcmgenet_priv *priv)
bcmgenet_intr_disable(priv);
- cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
+ cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
@@ -1674,6 +1700,10 @@ static int init_umac(struct bcmgenet_priv *priv)
bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
+ for (index = 0; index < priv->hw_params->tx_queues; index++)
+ bcmgenet_intrl2_1_writel(priv, (1 << index),
+ INTRL2_CPU_MASK_CLEAR);
+
/* Enable rx/tx engine.*/
dev_dbg(kdev, "done init umac\n");
@@ -1693,6 +1723,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
unsigned int first_bd;
spin_lock_init(&ring->lock);
+ ring->priv = priv;
+ netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
ring->index = index;
if (index == DESC_INDEX) {
ring->queue = 0;
@@ -1738,6 +1770,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
TDMA_WRITE_PTR);
bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
DMA_END_ADDR);
+
+ napi_enable(&ring->napi);
+}
+
+static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
+ unsigned int index)
+{
+ struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
+
+ napi_disable(&ring->napi);
+ netif_napi_del(&ring->napi);
}
/* Initialize a RDMA ring */
@@ -1907,7 +1950,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
return ret;
}
-static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
int i;
@@ -1926,6 +1969,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
kfree(priv->tx_cbs);
}
+static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+{
+ int i;
+
+ bcmgenet_fini_tx_ring(priv, DESC_INDEX);
+
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ bcmgenet_fini_tx_ring(priv, i);
+
+ __bcmgenet_fini_dma(priv);
+}
+
/* init_edma: Initialize DMA control register */
static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
{
@@ -1952,7 +2007,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
GFP_KERNEL);
if (!priv->tx_cbs) {
- bcmgenet_fini_dma(priv);
+ __bcmgenet_fini_dma(priv);
return -ENOMEM;
}
@@ -1975,9 +2030,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
struct bcmgenet_priv, napi);
unsigned int work_done;
- /* tx reclaim */
- bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
-
work_done = bcmgenet_desc_rx(priv, budget);
/* Advancing our consumer index*/
@@ -2022,28 +2074,34 @@ static void bcmgenet_irq_task(struct work_struct *work)
static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
+ struct bcmgenet_tx_ring *ring;
unsigned int index;
/* Save irq status for bottom-half processing. */
priv->irq1_stat =
bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
- ~priv->int1_mask;
+ ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
/* clear interrupts */
bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
netif_dbg(priv, intr, priv->dev,
"%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+
/* Check the MBDONE interrupts.
* packet is done, reclaim descriptors
*/
- if (priv->irq1_stat & 0x0000ffff) {
- index = 0;
- for (index = 0; index < 16; index++) {
- if (priv->irq1_stat & (1 << index))
- bcmgenet_tx_reclaim(priv->dev,
- &priv->tx_rings[index]);
+ for (index = 0; index < priv->hw_params->tx_queues; index++) {
+ if (!(priv->irq1_stat & BIT(index)))
+ continue;
+
+ ring = &priv->tx_rings[index];
+
+ if (likely(napi_schedule_prep(&ring->napi))) {
+ ring->int_disable(priv, ring);
+ __napi_schedule(&ring->napi);
}
}
+
return IRQ_HANDLED;
}
@@ -2075,8 +2133,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
if (priv->irq0_stat &
(UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
- /* Tx reclaim */
- bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+ struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
+
+ if (likely(napi_schedule_prep(&ring->napi))) {
+ ring->int_disable(priv, ring);
+ __napi_schedule(&ring->napi);
+ }
}
if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
UMAC_IRQ_PHY_DET_F |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index b36ddec0cc0a..0d370d168aee 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -520,6 +520,7 @@ struct bcmgenet_hw_params {
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
+ struct napi_struct napi; /* NAPI per tx queue */
unsigned int index; /* ring index */
unsigned int queue; /* queue index */
struct enet_cb *cbs; /* tx ring buffer control block*/
@@ -534,6 +535,7 @@ struct bcmgenet_tx_ring {
struct bcmgenet_tx_ring *);
void (*int_disable)(struct bcmgenet_priv *priv,
struct bcmgenet_tx_ring *);
+ struct bcmgenet_priv *priv;
};
/* device context */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 9062a8434246..c308429dd9c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
}
static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
- int addr_len)
+ u8 v6)
{
- return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) :
- ipv6_clip_hash(ctbl, addr);
+ return v6 ? ipv6_clip_hash(ctbl, addr) :
+ ipv4_clip_hash(ctbl, addr);
}
static int clip6_get_mbox(const struct net_device *dev,
@@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
struct clip_entry *ce, *cte;
u32 *addr = (u32 *)lip;
int hash;
- int addr_len;
- int ret = 0;
+ int ret = -1;
if (!ctbl)
return 0;
- if (v6)
- addr_len = 16;
- else
- addr_len = 4;
-
- hash = clip_addr_hash(ctbl, addr, addr_len);
+ hash = clip_addr_hash(ctbl, addr, v6);
read_lock_bh(&ctbl->lock);
list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
- if (addr_len == cte->addr_len &&
- memcmp(lip, cte->addr, cte->addr_len) == 0) {
+ if (cte->addr6.sin6_family == AF_INET6 && v6)
+ ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+ sizeof(struct in6_addr));
+ else if (cte->addr.sin_family == AF_INET && !v6)
+ ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+ sizeof(struct in_addr));
+ if (!ret) {
ce = cte;
read_unlock_bh(&ctbl->lock);
goto found;
@@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
spin_lock_init(&ce->lock);
atomic_set(&ce->refcnt, 0);
atomic_dec(&ctbl->nfree);
- ce->addr_len = addr_len;
- memcpy(ce->addr, lip, addr_len);
list_add_tail(&ce->list, &ctbl->hash_list[hash]);
if (v6) {
+ ce->addr6.sin6_family = AF_INET6;
+ memcpy(ce->addr6.sin6_addr.s6_addr,
+ lip, sizeof(struct in6_addr));
ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
if (ret) {
write_unlock_bh(&ctbl->lock);
return ret;
}
+ } else {
+ ce->addr.sin_family = AF_INET;
+ memcpy((char *)(&ce->addr.sin_addr), lip,
+ sizeof(struct in_addr));
}
} else {
write_unlock_bh(&ctbl->lock);
@@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
struct clip_entry *ce, *cte;
u32 *addr = (u32 *)lip;
int hash;
- int addr_len;
-
- if (v6)
- addr_len = 16;
- else
- addr_len = 4;
+ int ret = -1;
- hash = clip_addr_hash(ctbl, addr, addr_len);
+ hash = clip_addr_hash(ctbl, addr, v6);
read_lock_bh(&ctbl->lock);
list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
- if (addr_len == cte->addr_len &&
- memcmp(lip, cte->addr, cte->addr_len) == 0) {
+ if (cte->addr6.sin6_family == AF_INET6 && v6)
+ ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+ sizeof(struct in6_addr));
+ else if (cte->addr.sin_family == AF_INET && !v6)
+ ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+ sizeof(struct in_addr));
+ if (!ret) {
ce = cte;
read_unlock_bh(&ctbl->lock);
goto found;
@@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v)
for (i = 0 ; i < ctbl->clipt_size; ++i) {
list_for_each_entry(ce, &ctbl->hash_list[i], list) {
ip[0] = '\0';
- if (ce->addr_len == 16)
- sprintf(ip, "%pI6c", ce->addr);
- else
- sprintf(ip, "%pI4c", ce->addr);
+ sprintf(ip, "%pISc", &ce->addr);
seq_printf(seq, "%-25s %u\n", ip,
atomic_read(&ce->refcnt));
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
index 2eaba0161cf8..35eb43c6bcbb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -14,8 +14,10 @@ struct clip_entry {
spinlock_t lock; /* Hold while modifying clip reference */
atomic_t refcnt;
struct list_head list;
- u32 addr[4];
- int addr_len;
+ union {
+ struct sockaddr_in addr;
+ struct sockaddr_in6 addr6;
+ };
};
struct clip_tbl {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index d6cda17efe6e..97842d03675b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
#define T4_MEMORY_WRITE 0
#define T4_MEMORY_READ 1
int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
- __be32 *buf, int dir);
+ void *buf, int dir);
static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
u32 len, __be32 *buf)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4d643b65265e..853c38997c82 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
* @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
* @addr: address within indicated memory type
* @len: amount of memory to transfer
- * @buf: host memory buffer
+ * @hbuf: host memory buffer
* @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
*
* Reads/writes an [almost] arbitrary memory region in the firmware: the
@@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
* caller's responsibility to perform appropriate byte order conversions.
*/
int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
- u32 len, __be32 *buf, int dir)
+ u32 len, void *hbuf, int dir)
{
u32 pos, offset, resid, memoffset;
u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
+ u32 *buf;
/* Argument sanity checks ...
*/
- if (addr & 0x3)
+ if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
return -EINVAL;
+ buf = (u32 *)hbuf;
/* It's convenient to be able to handle lengths which aren't a
* multiple of 32-bits because we often end up transferring files to
@@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
/* Transfer data to/from the adapter as long as there's an integral
* number of 32-bit transfers to complete.
+ *
+ * A note on Endianness issues:
+ *
+ * The "register" reads and writes below from/to the PCI-E Memory
+ * Window invoke the standard adapter Big-Endian to PCI-E Link
+ * Little-Endian "swizzel." As a result, if we have the following
+ * data in adapter memory:
+ *
+ * Memory: ... | b0 | b1 | b2 | b3 | ...
+ * Address: i+0 i+1 i+2 i+3
+ *
+ * Then a read of the adapter memory via the PCI-E Memory Window
+ * will yield:
+ *
+ * x = readl(i)
+ * 31 0
+ * [ b3 | b2 | b1 | b0 ]
+ *
+ * If this value is stored into local memory on a Little-Endian system
+ * it will show up correctly in local memory as:
+ *
+ * ( ..., b0, b1, b2, b3, ... )
+ *
+ * But on a Big-Endian system, the store will show up in memory
+ * incorrectly swizzled as:
+ *
+ * ( ..., b3, b2, b1, b0, ... )
+ *
+ * So we need to account for this in the reads and writes to the
+ * PCI-E Memory Window below by undoing the register read/write
+ * swizzels.
*/
while (len > 0) {
if (dir == T4_MEMORY_READ)
- *buf++ = (__force __be32) t4_read_reg(adap,
- mem_base + offset);
+ *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
+ mem_base + offset));
else
t4_write_reg(adap, mem_base + offset,
- (__force u32) *buf++);
+ (__force u32)cpu_to_le32(*buf++));
offset += sizeof(__be32);
len -= sizeof(__be32);
@@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
*/
if (resid) {
union {
- __be32 word;
+ u32 word;
char byte[4];
} last;
unsigned char *bp;
int i;
if (dir == T4_MEMORY_READ) {
- last.word = (__force __be32) t4_read_reg(adap,
- mem_base + offset);
+ last.word = le32_to_cpu(
+ (__force __le32)t4_read_reg(adap,
+ mem_base + offset));
for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
bp[i] = last.byte[i];
} else {
@@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
for (i = resid; i < 4; i++)
last.byte[i] = 0;
t4_write_reg(adap, mem_base + offset,
- (__force u32) last.word);
+ (__force u32)cpu_to_le32(last.word));
}
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 9cbe038a388e..a5179bfcdc2c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
}
if (ENIC_TEST_INTR(pba, notify_intr)) {
- vnic_intr_return_all_credits(&enic->intr[notify_intr]);
enic_notify_check(enic);
+ vnic_intr_return_all_credits(&enic->intr[notify_intr]);
}
if (ENIC_TEST_INTR(pba, err_intr)) {
@@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
struct enic *enic = data;
unsigned int intr = enic_msix_notify_intr(enic);
- vnic_intr_return_all_credits(&enic->intr[intr]);
enic_notify_check(enic);
+ vnic_intr_return_all_credits(&enic->intr[intr]);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 43df78882e48..178e54028d10 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3162,8 +3162,8 @@ static void adjust_link(struct net_device *dev)
struct phy_device *phydev = priv->phydev;
if (unlikely(phydev->link != priv->oldlink ||
- phydev->duplex != priv->oldduplex ||
- phydev->speed != priv->oldspeed))
+ (phydev->link && (phydev->duplex != priv->oldduplex ||
+ phydev->speed != priv->oldspeed))))
gfar_update_link_state(priv);
}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index e8a1adb7a962..c05e50759621 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev)
device_remove_file(&dev->dev, &dev_attr_remove_port);
}
+static int ehea_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *unused)
+{
+ if (action == SYS_RESTART) {
+ pr_info("Reboot: freeing all eHEA resources\n");
+ ibmebus_unregister_driver(&ehea_driver);
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ehea_reboot_nb = {
+ .notifier_call = ehea_reboot_notifier,
+};
+
+static int ehea_mem_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int ret = NOTIFY_BAD;
+ struct memory_notify *arg = data;
+
+ mutex_lock(&dlpar_mem_lock);
+
+ switch (action) {
+ case MEM_CANCEL_OFFLINE:
+ pr_info("memory offlining canceled");
+ /* Fall through: re-add canceled memory block */
+
+ case MEM_ONLINE:
+ pr_info("memory is going online");
+ set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+ if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
+ goto out_unlock;
+ ehea_rereg_mrs();
+ break;
+
+ case MEM_GOING_OFFLINE:
+ pr_info("memory is going offline");
+ set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+ if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
+ goto out_unlock;
+ ehea_rereg_mrs();
+ break;
+
+ default:
+ break;
+ }
+
+ ehea_update_firmware_handles();
+ ret = NOTIFY_OK;
+
+out_unlock:
+ mutex_unlock(&dlpar_mem_lock);
+ return ret;
+}
+
+static struct notifier_block ehea_mem_nb = {
+ .notifier_call = ehea_mem_notifier,
+};
+
+static void ehea_crash_handler(void)
+{
+ int i;
+
+ if (ehea_fw_handles.arr)
+ for (i = 0; i < ehea_fw_handles.num_entries; i++)
+ ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
+ ehea_fw_handles.arr[i].fwh,
+ FORCE_FREE);
+
+ if (ehea_bcmc_regs.arr)
+ for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
+ ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
+ ehea_bcmc_regs.arr[i].port_id,
+ ehea_bcmc_regs.arr[i].reg_type,
+ ehea_bcmc_regs.arr[i].macaddr,
+ 0, H_DEREG_BCMC);
+}
+
+static atomic_t ehea_memory_hooks_registered;
+
+/* Register memory hooks on probe of first adapter */
+static int ehea_register_memory_hooks(void)
+{
+ int ret = 0;
+
+ if (atomic_inc_and_test(&ehea_memory_hooks_registered))
+ return 0;
+
+ ret = ehea_create_busmap();
+ if (ret) {
+ pr_info("ehea_create_busmap failed\n");
+ goto out;
+ }
+
+ ret = register_reboot_notifier(&ehea_reboot_nb);
+ if (ret) {
+ pr_info("register_reboot_notifier failed\n");
+ goto out;
+ }
+
+ ret = register_memory_notifier(&ehea_mem_nb);
+ if (ret) {
+ pr_info("register_memory_notifier failed\n");
+ goto out2;
+ }
+
+ ret = crash_shutdown_register(ehea_crash_handler);
+ if (ret) {
+ pr_info("crash_shutdown_register failed\n");
+ goto out3;
+ }
+
+ return 0;
+
+out3:
+ unregister_memory_notifier(&ehea_mem_nb);
+out2:
+ unregister_reboot_notifier(&ehea_reboot_nb);
+out:
+ return ret;
+}
+
+static void ehea_unregister_memory_hooks(void)
+{
+ if (atomic_read(&ehea_memory_hooks_registered))
+ return;
+
+ unregister_reboot_notifier(&ehea_reboot_nb);
+ if (crash_shutdown_unregister(ehea_crash_handler))
+ pr_info("failed unregistering crash handler\n");
+ unregister_memory_notifier(&ehea_mem_nb);
+}
+
static int ehea_probe_adapter(struct platform_device *dev)
{
struct ehea_adapter *adapter;
@@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev)
int ret;
int i;
+ ret = ehea_register_memory_hooks();
+ if (ret)
+ return ret;
+
if (!dev || !dev->dev.of_node) {
pr_err("Invalid ibmebus device probed\n");
return -EINVAL;
@@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev)
return 0;
}
-static void ehea_crash_handler(void)
-{
- int i;
-
- if (ehea_fw_handles.arr)
- for (i = 0; i < ehea_fw_handles.num_entries; i++)
- ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
- ehea_fw_handles.arr[i].fwh,
- FORCE_FREE);
-
- if (ehea_bcmc_regs.arr)
- for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
- ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
- ehea_bcmc_regs.arr[i].port_id,
- ehea_bcmc_regs.arr[i].reg_type,
- ehea_bcmc_regs.arr[i].macaddr,
- 0, H_DEREG_BCMC);
-}
-
-static int ehea_mem_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- int ret = NOTIFY_BAD;
- struct memory_notify *arg = data;
-
- mutex_lock(&dlpar_mem_lock);
-
- switch (action) {
- case MEM_CANCEL_OFFLINE:
- pr_info("memory offlining canceled");
- /* Readd canceled memory block */
- case MEM_ONLINE:
- pr_info("memory is going online");
- set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
- if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
- goto out_unlock;
- ehea_rereg_mrs();
- break;
- case MEM_GOING_OFFLINE:
- pr_info("memory is going offline");
- set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
- if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
- goto out_unlock;
- ehea_rereg_mrs();
- break;
- default:
- break;
- }
-
- ehea_update_firmware_handles();
- ret = NOTIFY_OK;
-
-out_unlock:
- mutex_unlock(&dlpar_mem_lock);
- return ret;
-}
-
-static struct notifier_block ehea_mem_nb = {
- .notifier_call = ehea_mem_notifier,
-};
-
-static int ehea_reboot_notifier(struct notifier_block *nb,
- unsigned long action, void *unused)
-{
- if (action == SYS_RESTART) {
- pr_info("Reboot: freeing all eHEA resources\n");
- ibmebus_unregister_driver(&ehea_driver);
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block ehea_reboot_nb = {
- .notifier_call = ehea_reboot_notifier,
-};
-
static int check_module_parm(void)
{
int ret = 0;
@@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void)
if (ret)
goto out;
- ret = ehea_create_busmap();
- if (ret)
- goto out;
-
- ret = register_reboot_notifier(&ehea_reboot_nb);
- if (ret)
- pr_info("failed registering reboot notifier\n");
-
- ret = register_memory_notifier(&ehea_mem_nb);
- if (ret)
- pr_info("failed registering memory remove notifier\n");
-
- ret = crash_shutdown_register(ehea_crash_handler);
- if (ret)
- pr_info("failed registering crash handler\n");
-
ret = ibmebus_register_driver(&ehea_driver);
if (ret) {
pr_err("failed registering eHEA device driver on ebus\n");
- goto out2;
+ goto out;
}
ret = driver_create_file(&ehea_driver.driver,
@@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void)
if (ret) {
pr_err("failed to register capabilities attribute, ret=%d\n",
ret);
- goto out3;
+ goto out2;
}
return ret;
-out3:
- ibmebus_unregister_driver(&ehea_driver);
out2:
- unregister_memory_notifier(&ehea_mem_nb);
- unregister_reboot_notifier(&ehea_reboot_nb);
- crash_shutdown_unregister(ehea_crash_handler);
+ ibmebus_unregister_driver(&ehea_driver);
out:
return ret;
}
static void __exit ehea_module_exit(void)
{
- int ret;
-
driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
ibmebus_unregister_driver(&ehea_driver);
- unregister_reboot_notifier(&ehea_reboot_nb);
- ret = crash_shutdown_unregister(ehea_crash_handler);
- if (ret)
- pr_info("failed unregistering crash handler\n");
- unregister_memory_notifier(&ehea_mem_nb);
+ ehea_unregister_memory_hooks();
kfree(ehea_fw_handles.arr);
kfree(ehea_bcmc_regs.arr);
ehea_destroy_busmap();
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 21978cc019e7..072426a72745 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
return ret;
}
+static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ u64 mac_address;
+ int rc;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ mac_address = ibmveth_encode_mac_addr(addr->sa_data);
+ rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
+ if (rc) {
+ netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
+ return rc;
+ }
+
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+ return 0;
+}
+
static const struct net_device_ops ibmveth_netdev_ops = {
.ndo_open = ibmveth_open,
.ndo_stop = ibmveth_close,
@@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
.ndo_fix_features = ibmveth_fix_features,
.ndo_set_features = ibmveth_set_features,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = ibmveth_set_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ibmveth_poll_controller,
#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 11a9ffebf8d8..6aea65dae5ed 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -868,8 +868,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
* The grst delay value is in 100ms units, and we'll wait a
* couple counts longer to be sure we don't just miss the end.
*/
- grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
- >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+ grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
+ I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+ I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
for (cnt = 0; cnt < grst_del + 2; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
@@ -2846,7 +2847,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
- if (!status)
+ if (!status && filter_index)
*filter_index = resp->index;
return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 183dcb63ce98..a11c70ca5a28 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
u32 val;
val = rd32(hw, I40E_PRTDCB_GENC);
- *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >>
+ *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
I40E_PRTDCB_GENC_PFCLDA_SHIFT);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 61236f983971..c17ee77100d3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -989,8 +989,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (!cmd_buf)
return count;
bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
- if (bytes_not_copied < 0)
+ if (bytes_not_copied < 0) {
+ kfree(cmd_buf);
return bytes_not_copied;
+ }
if (bytes_not_copied > 0)
count -= bytes_not_copied;
cmd_buf[count] = '\0';
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index cbe281be1c9f..dadda3c5d658 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1512,7 +1512,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
/* Number of queues per enabled TC */
- num_tc_qps = vsi->alloc_queue_pairs/numtc;
+ /* In MFP case we can have a much lower count of MSIx
+ * vectors available and so we need to lower the used
+ * q count.
+ */
+ qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+ num_tc_qps = qcount / numtc;
num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
/* Setup queue offset/count for all TCs for given VSI */
@@ -2684,8 +2689,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
u16 qoffset, qcount;
int i, n;
- if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
- return;
+ if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+ /* Reset the TC information */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ rx_ring = vsi->rx_rings[i];
+ tx_ring = vsi->tx_rings[i];
+ rx_ring->dcb_tc = 0;
+ tx_ring->dcb_tc = 0;
+ }
+ }
for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
if (!(vsi->tc_config.enabled_tc & (1 << n)))
@@ -3830,6 +3842,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
{
int i;
+ i40e_stop_misc_vector(pf);
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ synchronize_irq(pf->msix_entries[0].vector);
+ free_irq(pf->msix_entries[0].vector, pf);
+ }
+
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
@@ -5254,8 +5272,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Wait for the PF's Tx queues to be disabled */
ret = i40e_pf_wait_txq_disabled(pf);
- if (!ret)
+ if (ret) {
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ i40e_service_event_schedule(pf);
+ } else {
i40e_pf_unquiesce_all_vsi(pf);
+ }
+
exit:
return ret;
}
@@ -5587,7 +5611,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
int i, v;
/* If we're down or resetting, just bail */
- if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ if (test_bit(__I40E_DOWN, &pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, &pf->state))
return;
/* for each VSI/netdev
@@ -9533,6 +9558,7 @@ static void i40e_remove(struct pci_dev *pdev)
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
+ i40e_fdir_teardown(pf);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
@@ -9559,12 +9585,6 @@ static void i40e_remove(struct pci_dev *pdev)
if (pf->vsi[pf->lan_vsi])
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
- i40e_stop_misc_vector(pf);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
- synchronize_irq(pf->msix_entries[0].vector);
- free_irq(pf->msix_entries[0].vector, pf);
- }
-
/* shutdown and destroy the HMC */
if (pf->hw.hmc.hmc_obj) {
ret_code = i40e_shutdown_lan_hmc(&pf->hw);
@@ -9718,6 +9738,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ i40e_clear_interrupt_scheme(pf);
+
if (system_state == SYSTEM_POWER_OFF) {
pci_wake_from_d3(pdev, pf->wol_en);
pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 3e70f2e45a47..5defe0d63514 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -679,9 +679,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
{
i40e_status status;
enum i40e_nvmupd_cmd upd_cmd;
+ bool retry_attempt = false;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+retry:
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
@@ -725,6 +727,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
*errno = -ESRCH;
break;
}
+
+ /* In some circumstances, a multi-write transaction takes longer
+ * than the default 3 minute timeout on the write semaphore. If
+ * the write failed with an EBUSY status, this is likely the problem,
+ * so here we try to reacquire the semaphore then retry the write.
+ * We only do one retry, then give up.
+ */
+ if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+ !retry_attempt) {
+ i40e_status old_status = status;
+ u32 old_asq_status = hw->aq.asq_last_status;
+ u32 gtime;
+
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ if (gtime >= hw->nvm.hw_semaphore_timeout) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
+ gtime, hw->nvm.hw_semaphore_timeout);
+ i40e_release_nvm(hw);
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
+ hw->aq.asq_last_status);
+ status = old_status;
+ hw->aq.asq_last_status = old_asq_status;
+ } else {
+ retry_attempt = true;
+ goto retry;
+ }
+ }
+ }
+
return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 2206d2d36f0f..bbf1b1247ac4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -586,6 +586,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
}
/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
*
@@ -594,10 +608,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
**/
static u32 i40e_get_tx_pending(struct i40e_ring *ring)
{
- u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
- ? ring->next_to_use
- : ring->next_to_use + ring->count);
- return ntu - ring->next_to_clean;
+ u32 head, tail;
+
+ head = i40e_get_head(ring);
+ tail = readl(ring->tail);
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
}
/**
@@ -606,6 +626,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
**/
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
{
+ u32 tx_done = tx_ring->stats.packets;
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
u32 tx_pending = i40e_get_tx_pending(tx_ring);
struct i40e_pf *pf = tx_ring->vsi->back;
bool ret = false;
@@ -623,41 +645,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
- if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending >= I40E_MIN_DESC_PENDING)) {
+ if ((tx_done_old == tx_done) && tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
- } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending < I40E_MIN_DESC_PENDING) &&
- (tx_pending > 0)) {
+ } else if (tx_done_old == tx_done &&
+ (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
tx_pending, tx_ring->queue_index);
pf->tx_sluggish_count++;
} else {
/* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+ tx_ring->tx_stats.tx_done_old = tx_done;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
return ret;
}
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
- return le32_to_cpu(*(volatile __le32 *)head);
-}
-
#define WB_STRIDE 0x3
/**
@@ -2140,6 +2146,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
}
/**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb: send buffer
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+ const u8 hdr_len)
+{
+ struct skb_frag_struct *frag;
+ bool linearize = false;
+ unsigned int size = 0;
+ u16 num_frags;
+ u16 gso_segs;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+ u16 j = 1;
+
+ if (num_frags < (I40E_MAX_BUFFER_TXD))
+ goto linearize_chk_done;
+ /* try the simple math, if we have too many frags per segment */
+ if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+ I40E_MAX_BUFFER_TXD) {
+ linearize = true;
+ goto linearize_chk_done;
+ }
+ frag = &skb_shinfo(skb)->frags[0];
+ size = hdr_len;
+ /* we might still have more fragments per segment */
+ do {
+ size += skb_frag_size(frag);
+ frag++; j++;
+ if (j == I40E_MAX_BUFFER_TXD) {
+ if (size < skb_shinfo(skb)->gso_size) {
+ linearize = true;
+ break;
+ }
+ j = 1;
+ size -= skb_shinfo(skb)->gso_size;
+ if (size)
+ j++;
+ size += hdr_len;
+ }
+ num_frags--;
+ } while (num_frags);
+ } else {
+ if (num_frags >= I40E_MAX_BUFFER_TXD)
+ linearize = true;
+ }
+
+linearize_chk_done:
+ return linearize;
+}
+
+/**
* i40e_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
* @skb: send buffer
@@ -2396,6 +2463,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN;
+ if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+ if (skb_linearize(skb))
+ goto out_drop;
+
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 18b00231d2f1..dff0baeb1ecc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
#define I40E_MAX_DATA_PER_TXD 8192
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 29004382f462..708891571dae 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -126,6 +126,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
}
/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
*
@@ -134,10 +148,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
**/
static u32 i40e_get_tx_pending(struct i40e_ring *ring)
{
- u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
- ? ring->next_to_use
- : ring->next_to_use + ring->count);
- return ntu - ring->next_to_clean;
+ u32 head, tail;
+
+ head = i40e_get_head(ring);
+ tail = readl(ring->tail);
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
}
/**
@@ -146,6 +166,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
**/
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
{
+ u32 tx_done = tx_ring->stats.packets;
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
u32 tx_pending = i40e_get_tx_pending(tx_ring);
bool ret = false;
@@ -162,36 +184,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
- if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending >= I40E_MIN_DESC_PENDING)) {
+ if ((tx_done_old == tx_done) && tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
- } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) ||
- !(tx_pending < I40E_MIN_DESC_PENDING) ||
- !(tx_pending > 0)) {
+ } else if (tx_done_old == tx_done &&
+ (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
/* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+ tx_ring->tx_stats.tx_done_old = tx_done;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
return ret;
}
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
- return le32_to_cpu(*(volatile __le32 *)head);
-}
-
#define WB_STRIDE 0x3
/**
@@ -1206,17 +1212,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (err < 0)
return err;
- if (protocol == htons(ETH_P_IP)) {
- iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+ if (iph->version == 4) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
- } else if (skb_is_gso_v6(skb)) {
-
- ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
- : ipv6_hdr(skb);
+ } else if (ipv6h->version == 6) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
ipv6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1274,13 +1279,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
- if (tx_flags & I40E_TX_FLAGS_TSO) {
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ if (tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
- } else {
- *cd_tunneling |=
- I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
- }
}
/* Now set the ctx descriptor fields */
@@ -1290,6 +1291,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
((skb_inner_network_offset(skb) -
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+ if (this_ip_hdr->version == 6) {
+ tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+ }
+
} else {
network_hdr_len = skb_network_header_len(skb);
@@ -1380,6 +1386,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
}
+ /**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb: send buffer
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+ const u8 hdr_len)
+{
+ struct skb_frag_struct *frag;
+ bool linearize = false;
+ unsigned int size = 0;
+ u16 num_frags;
+ u16 gso_segs;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+ u16 j = 1;
+
+ if (num_frags < (I40E_MAX_BUFFER_TXD))
+ goto linearize_chk_done;
+ /* try the simple math, if we have too many frags per segment */
+ if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+ I40E_MAX_BUFFER_TXD) {
+ linearize = true;
+ goto linearize_chk_done;
+ }
+ frag = &skb_shinfo(skb)->frags[0];
+ size = hdr_len;
+ /* we might still have more fragments per segment */
+ do {
+ size += skb_frag_size(frag);
+ frag++; j++;
+ if (j == I40E_MAX_BUFFER_TXD) {
+ if (size < skb_shinfo(skb)->gso_size) {
+ linearize = true;
+ break;
+ }
+ j = 1;
+ size -= skb_shinfo(skb)->gso_size;
+ if (size)
+ j++;
+ size += hdr_len;
+ }
+ num_frags--;
+ } while (num_frags);
+ } else {
+ if (num_frags >= I40E_MAX_BUFFER_TXD)
+ linearize = true;
+ }
+
+linearize_chk_done:
+ return linearize;
+}
+
/**
* i40e_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
@@ -1654,6 +1721,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO;
+ if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+ if (skb_linearize(skb))
+ goto out_drop;
+
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 4e15903b2b6d..c950a038237c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
#define I40E_MAX_DATA_PER_TXD 8192
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 2d8ee66138e8..a61009f4b2df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
{
u32 loopback_ok = 0;
int i;
-
+ bool gro_enabled;
priv->loopback_ok = 0;
priv->validate_loopback = 1;
+ gro_enabled = priv->dev->features & NETIF_F_GRO;
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+ priv->dev->features &= ~NETIF_F_GRO;
/* xmit */
if (mlx4_en_test_loopback_xmit(priv)) {
@@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
mlx4_en_test_loopback_exit:
priv->validate_loopback = 0;
+
+ if (gro_enabled)
+ priv->dev->features |= NETIF_F_GRO;
+
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
return !loopback_ok;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2bb8553bd905..eda29dbbfcd2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -412,7 +412,6 @@ err_icm:
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
-#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
enum mlx4_update_qp_attr attr,
struct mlx4_update_qp_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 486e3d26cd4a..d97ca88c55b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -713,7 +713,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_priv *priv;
u32 qp_type;
- int port;
+ int port, err = 0;
port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
priv = mlx4_priv(dev);
@@ -738,7 +738,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
} else {
struct mlx4_update_qp_params params = {.flags = 0};
- mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+ err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+ if (err)
+ goto out;
}
}
@@ -773,7 +775,8 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
}
- return 0;
+out:
+ return err;
}
static int mpt_mask(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 44e8d7d25547..57a6e6cd74fc 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev)
if (mac->phydev)
phy_start(mac->phydev);
- init_timer(&mac->tx->clean_timer);
- mac->tx->clean_timer.function = pasemi_mac_tx_timer;
- mac->tx->clean_timer.data = (unsigned long)mac->tx;
- mac->tx->clean_timer.expires = jiffies+HZ;
- add_timer(&mac->tx->clean_timer);
+ setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
+ (unsigned long)mac->tx);
+ mod_timer(&mac->tx->clean_timer, jiffies + HZ);
return 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 6e426ae94692..0a5e204a0179 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -354,7 +354,7 @@ struct cmd_desc_type0 {
} __attribute__ ((aligned(64)));
-/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+/* Note: sizeof(rcv_desc) should always be a multiple of 2 */
struct rcv_desc {
__le16 reference_handle;
__le16 reserved;
@@ -499,7 +499,7 @@ struct uni_data_desc{
#define NETXEN_IMAGE_START 0x43000 /* compressed image */
#define NETXEN_SECONDARY_START 0x200000 /* backup images */
#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */
-#define NETXEN_USER_START 0x3E8000 /* Firmare info */
+#define NETXEN_USER_START 0x3E8000 /* Firmware info */
#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */
#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index fa4317611fd6..f221126a5c4e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -314,7 +314,7 @@ struct qlcnic_fdt {
#define QLCNIC_BRDCFG_START 0x4000 /* board config */
#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
-#define QLCNIC_USER_START 0x3E8000 /* Firmare info */
+#define QLCNIC_USER_START 0x3E8000 /* Firmware info */
#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index ad0020af2193..c70ab40d8698 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
int rc = -EINVAL;
if (!rtl_fw_format_ok(tp, rtl_fw)) {
- netif_err(tp, ifup, dev, "invalid firwmare\n");
+ netif_err(tp, ifup, dev, "invalid firmware\n");
goto out;
}
@@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
RTL_W8(ChipCmd, CmdReset);
rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
-
- netdev_reset_queue(tp->dev);
}
static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
u32 status, len;
u32 opts[2];
int frags;
- bool stop_queue;
if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]);
- netdev_sent_queue(dev, skb->len);
-
skb_tx_timestamp(skb);
/* Force memory writes to complete before releasing descriptor */
@@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->cur_tx += frags + 1;
- stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS);
+ RTL_W8(TxPoll, NPQ);
- if (!skb->xmit_more || stop_queue ||
- netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
- RTL_W8(TxPoll, NPQ);
-
- mmiowb();
- }
+ mmiowb();
- if (stop_queue) {
+ if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue.
*/
@@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
{
unsigned int dirty_tx, tx_left;
- unsigned int bytes_compl = 0, pkts_compl = 0;
dirty_tx = tp->dirty_tx;
smp_rmb();
@@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
tp->TxDescArray + entry);
if (status & LastFrag) {
- pkts_compl++;
- bytes_compl += tx_skb->skb->len;
+ u64_stats_update_begin(&tp->tx_stats.syncp);
+ tp->tx_stats.packets++;
+ tp->tx_stats.bytes += tx_skb->skb->len;
+ u64_stats_update_end(&tp->tx_stats.syncp);
dev_kfree_skb_any(tx_skb->skb);
tx_skb->skb = NULL;
}
@@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
}
if (tp->dirty_tx != dirty_tx) {
- netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
-
- u64_stats_update_begin(&tp->tx_stats.syncp);
- tp->tx_stats.packets += pkts_compl;
- tp->tx_stats.bytes += bytes_compl;
- u64_stats_update_end(&tp->tx_stats.syncp);
-
tp->dirty_tx = dirty_tx;
/* Sync with rtl8169_start_xmit:
* - publish dirty_tx ring index (write barrier)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 4da8bd263997..736d5d1624a1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -508,7 +508,6 @@ static struct sh_eth_cpu_data r8a779x_data = {
.tpauser = 1,
.hw_swap = 1,
.rmiimode = 1,
- .shift_rd0 = 1,
};
static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -1392,6 +1391,9 @@ static void sh_eth_dev_exit(struct net_device *ndev)
msleep(2); /* max frame time at 10 Mbps < 1250 us */
sh_eth_get_stats(ndev);
sh_eth_reset(ndev);
+
+ /* Set MAC address again */
+ update_mac_address(ndev);
}
/* free Tx skb function */
@@ -1407,6 +1409,8 @@ static int sh_eth_txfree(struct net_device *ndev)
txdesc = &mdp->tx_ring[entry];
if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
break;
+ /* TACT bit must be checked before all the following reads */
+ rmb();
/* Free the original skb. */
if (mdp->tx_skbuff[entry]) {
dma_unmap_single(&ndev->dev, txdesc->addr,
@@ -1444,6 +1448,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
limit = boguscnt;
rxdesc = &mdp->rx_ring[entry];
while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
+ /* RACT bit must be checked before all the following reads */
+ rmb();
desc_status = edmac_to_cpu(mdp, rxdesc->status);
pkt_len = rxdesc->frame_length;
@@ -1455,8 +1461,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
/* In case of almost all GETHER/ETHERs, the Receive Frame State
* (RFS) bits in the Receive Descriptor 0 are from bit 9 to
- * bit 0. However, in case of the R8A7740, R8A779x, and
- * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
+ * bit 0. However, in case of the R8A7740 and R7S72100
+ * the RFS bits are from bit 25 to bit 16. So, the
* driver needs right shifting by 16.
*/
if (mdp->cd->shift_rd0)
@@ -1523,6 +1529,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
skb_checksum_none_assert(skb);
rxdesc->addr = dma_addr;
}
+ wmb(); /* RACT bit must be set after all the above writes */
if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
@@ -1535,7 +1542,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
/* If we don't need to check status, don't. -KDU */
if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
/* fix the values for the next receiving if RDE is set */
- if (intr_status & EESR_RDE) {
+ if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) {
u32 count = (sh_eth_read(ndev, RDFAR) -
sh_eth_read(ndev, RDLAR)) >> 4;
@@ -2174,7 +2181,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
spin_unlock_irqrestore(&mdp->lock, flags);
- if (skb_padto(skb, ETH_ZLEN))
+ if (skb_put_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
entry = mdp->cur_tx % mdp->num_tx_ring;
@@ -2192,6 +2199,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
txdesc->buffer_length = skb->len;
+ wmb(); /* TACT bit must be set after all the above writes */
if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
else
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 34389b6aa67c..9fb6948e14c6 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -1257,9 +1257,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
if (enable)
- val |= 1 << rocker_port->lport;
+ val |= 1ULL << rocker_port->lport;
else
- val &= ~(1 << rocker_port->lport);
+ val &= ~(1ULL << rocker_port->lport);
rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
}
@@ -4201,6 +4201,8 @@ static int rocker_probe_ports(struct rocker *rocker)
alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+ if (!rocker->ports)
+ return -ENOMEM;
for (i = 0; i < rocker->port_count; i++) {
err = rocker_probe_port(rocker, i);
if (err)
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 6b33127ab352..3449893aea8d 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev)
smc->packets_waiting = 0;
smc_reset(dev);
- init_timer(&smc->media);
- smc->media.function = media_check;
- smc->media.data = (u_long) dev;
- smc->media.expires = jiffies + HZ;
- add_timer(&smc->media);
+ setup_timer(&smc->media, media_check, (u_long)dev);
+ mod_timer(&smc->media, jiffies + HZ);
return 0;
} /* smc_open */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 88a55f95fe09..209ee1b27f8d 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -91,6 +91,10 @@ static const char version[] =
#include "smc91x.h"
+#if defined(CONFIG_ASSABET_NEPONSET)
+#include <mach/neponset.h>
+#endif
+
#ifndef SMC_NOWAIT
# define SMC_NOWAIT 0
#endif
@@ -2355,8 +2359,9 @@ static int smc_drv_probe(struct platform_device *pdev)
ret = smc_request_attrib(pdev, ndev);
if (ret)
goto out_release_io;
-#if defined(CONFIG_SA1100_ASSABET)
- neponset_ncr_set(NCR_ENET_OSC_EN);
+#if defined(CONFIG_ASSABET_NEPONSET)
+ if (machine_is_assabet() && machine_has_neponset())
+ neponset_ncr_set(NCR_ENET_OSC_EN);
#endif
platform_set_drvdata(pdev, ndev);
ret = smc_enable_device(pdev);
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index be67baf5f677..3a18501d1068 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -39,14 +39,7 @@
* Define your architecture specific bus configuration parameters here.
*/
-#if defined(CONFIG_ARCH_LUBBOCK) ||\
- defined(CONFIG_MACH_MAINSTONE) ||\
- defined(CONFIG_MACH_ZYLONITE) ||\
- defined(CONFIG_MACH_LITTLETON) ||\
- defined(CONFIG_MACH_ZYLONITE2) ||\
- defined(CONFIG_ARCH_VIPER) ||\
- defined(CONFIG_MACH_STARGATE2) ||\
- defined(CONFIG_ARCH_VERSATILE)
+#if defined(CONFIG_ARM)
#include <asm/mach-types.h>
@@ -74,95 +67,8 @@
/* We actually can't write halfwords properly if not word aligned */
static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
{
- if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) {
- unsigned int v = val << 16;
- v |= readl(ioaddr + (reg & ~2)) & 0xffff;
- writel(v, ioaddr + (reg & ~2));
- } else {
- writew(val, ioaddr + reg);
- }
-}
-
-#elif defined(CONFIG_SA1100_PLEB)
-/* We can only do 16-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS (-1)
-
-#elif defined(CONFIG_SA1100_ASSABET)
-
-#include <mach/neponset.h>
-
-/* We can only do 8-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 0
-#define SMC_CAN_USE_32BIT 0
-#define SMC_NOWAIT 1
-
-/* The first two address lines aren't connected... */
-#define SMC_IO_SHIFT 2
-
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
-#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
-#define SMC_IRQ_FLAGS (-1) /* from resource */
-
-#elif defined(CONFIG_MACH_LOGICPD_PXA270) || \
- defined(CONFIG_MACH_NOMADIK_8815NHK)
-
-#define SMC_CAN_USE_8BIT 0
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-
-#elif defined(CONFIG_ARCH_INNOKOM) || \
- defined(CONFIG_ARCH_PXA_IDP) || \
- defined(CONFIG_ARCH_RAMSES) || \
- defined(CONFIG_ARCH_PCM027)
-
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 1
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-#define SMC_USE_PXA_DMA 1
-
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_inl(a, r) readl((a) + (r))
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_outl(v, a, r) writel(v, (a) + (r))
-#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
-#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-#define SMC_IRQ_FLAGS (-1) /* from resource */
-
-/* We actually can't write halfwords properly if not word aligned */
-static inline void
-SMC_outw(u16 val, void __iomem *ioaddr, int reg)
-{
- if (reg & 2) {
+ if ((machine_is_mainstone() || machine_is_stargate2() ||
+ machine_is_pxa_idp()) && reg & 2) {
unsigned int v = val << 16;
v |= readl(ioaddr + (reg & ~2)) & 0xffff;
writel(v, ioaddr + (reg & ~2));
@@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define RPC_LSA_DEFAULT RPC_LED_100_10
#define RPC_LSB_DEFAULT RPC_LED_TX_RX
-#elif defined(CONFIG_ARCH_MSM)
-
-#define SMC_CAN_USE_8BIT 0
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_NOWAIT 1
-
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
-
#elif defined(CONFIG_COLDFIRE)
#define SMC_CAN_USE_8BIT 0
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 55e89b3838f1..a0ea84fe6519 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
spin_lock_irqsave(&priv->lock, flags);
if (!priv->eee_active) {
priv->eee_active = 1;
- init_timer(&priv->eee_ctrl_timer);
- priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
- priv->eee_ctrl_timer.data = (unsigned long)priv;
- priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
- add_timer(&priv->eee_ctrl_timer);
+ setup_timer(&priv->eee_ctrl_timer,
+ stmmac_eee_ctrl_timer,
+ (unsigned long)priv);
+ mod_timer(&priv->eee_ctrl_timer,
+ STMMAC_LPI_T(eee_timer));
priv->hw->mac->set_eee_timer(priv->hw,
STMMAC_DEFAULT_LIT_LS,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 4b51f903fb73..0c5842aeb807 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type)
*flow_type = IP_USER_FLOW;
break;
default:
- return 0;
+ return -EINVAL;
}
- return 1;
+ return 0;
}
static int niu_ethflow_to_class(int flow_type, u64 *class)
@@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
TCAM_V4KEY0_CLASS_CODE_SHIFT;
ret = niu_class_to_ethflow(class, &fsp->flow_type);
-
if (ret < 0) {
netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
parent->index);
- ret = -EINVAL;
goto out;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7d8dd0d2182e..a1bbaf6352ba 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
port_mask, ALE_VLAN, slave->port_vlan, 0);
cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
- priv->host_port, ALE_VLAN, slave->port_vlan);
+ priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
}
static void soft_reset_slave(struct cpsw_slave *slave)
@@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int cpsw_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev)
}
return 0;
}
+#endif
-static const struct dev_pm_ops cpsw_pm_ops = {
- .suspend = cpsw_suspend,
- .resume = cpsw_resume,
-};
+static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
static const struct of_device_id cpsw_of_mtable[] = {
{ .compatible = "ti,cpsw", },
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 98655b44b97e..c00084d689f3 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int davinci_mdio_suspend(struct device *dev)
{
struct davinci_mdio_data *data = dev_get_drvdata(dev);
@@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev)
return 0;
}
+#endif
static const struct dev_pm_ops davinci_mdio_pm_ops = {
- .suspend_late = davinci_mdio_suspend,
- .resume_early = davinci_mdio_resume,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
};
#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index f7e0f0f7c2e2..9e16a2819d48 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev)
int i;
static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
- if (dev->flags & IFF_ALLMULTI) {
+ if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
for (i = 0; i < ETH_ALEN; i++) {
__raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
__raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index e40fdfccc9c1..27ecc5c4fa26 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
} /* else everything is zero */
}
+/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
+#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
+
/* Get packet from user space buffer */
static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
struct iov_iter *from, int noblock)
{
- int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
+ int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
struct sk_buff *skb;
struct macvlan_dev *vlan;
unsigned long total_len = iov_iter_count(from);
@@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
}
- skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
+ skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
linear, noblock, &err);
if (!skb)
goto err;
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index 9e3af54c9010..32efbd48f326 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -92,6 +92,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
+#define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
+#define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
#define XGBE_PHY_SPEEDS 3
#define XGBE_PHY_SPEED_1000 0
@@ -177,10 +179,12 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define SPEED_10000_BLWC 0
#define SPEED_10000_CDR 0x7
#define SPEED_10000_PLL 0x1
-#define SPEED_10000_PQ 0x1e
+#define SPEED_10000_PQ 0x12
#define SPEED_10000_RATE 0x0
#define SPEED_10000_TXAMP 0xa
#define SPEED_10000_WORD 0x7
+#define SPEED_10000_DFE_TAP_CONFIG 0x1
+#define SPEED_10000_DFE_TAP_ENABLE 0x7f
#define SPEED_2500_BLWC 1
#define SPEED_2500_CDR 0x2
@@ -189,6 +193,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define SPEED_2500_RATE 0x1
#define SPEED_2500_TXAMP 0xf
#define SPEED_2500_WORD 0x1
+#define SPEED_2500_DFE_TAP_CONFIG 0x3
+#define SPEED_2500_DFE_TAP_ENABLE 0x0
#define SPEED_1000_BLWC 1
#define SPEED_1000_CDR 0x2
@@ -197,16 +203,25 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define SPEED_1000_RATE 0x3
#define SPEED_1000_TXAMP 0xf
#define SPEED_1000_WORD 0x1
+#define SPEED_1000_DFE_TAP_CONFIG 0x3
+#define SPEED_1000_DFE_TAP_ENABLE 0x0
/* SerDes RxTx register offsets */
+#define RXTX_REG6 0x0018
#define RXTX_REG20 0x0050
+#define RXTX_REG22 0x0058
#define RXTX_REG114 0x01c8
+#define RXTX_REG129 0x0204
/* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX 8
+#define RXTX_REG6_RESETB_RXD_WIDTH 1
#define RXTX_REG20_BLWC_ENA_INDEX 2
#define RXTX_REG20_BLWC_ENA_WIDTH 1
#define RXTX_REG114_PQ_REG_INDEX 9
#define RXTX_REG114_PQ_REG_WIDTH 7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
@@ -333,6 +348,18 @@ static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
SPEED_10000_TXAMP,
};
+static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
+ SPEED_1000_DFE_TAP_CONFIG,
+ SPEED_2500_DFE_TAP_CONFIG,
+ SPEED_10000_DFE_TAP_CONFIG,
+};
+
+static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
+ SPEED_1000_DFE_TAP_ENABLE,
+ SPEED_2500_DFE_TAP_ENABLE,
+ SPEED_10000_DFE_TAP_ENABLE,
+};
+
enum amd_xgbe_phy_an {
AMD_XGBE_AN_READY = 0,
AMD_XGBE_AN_PAGE_RECEIVED,
@@ -393,6 +420,8 @@ struct amd_xgbe_phy_priv {
u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
+ u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
+ u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
/* Auto-negotiation state machine support */
struct mutex an_mutex;
@@ -481,11 +510,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
status = XSIR0_IOREAD(priv, SIR0_STATUS);
if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
- return;
+ goto rx_reset;
}
netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
status);
+
+rx_reset:
+ /* Perform Rx reset for the DFE changes */
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
}
static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
@@ -534,6 +568,10 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
+ XRXTX_IOWRITE(priv, RXTX_REG22,
+ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
amd_xgbe_phy_serdes_complete_ratechange(phydev);
@@ -586,6 +624,10 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
+ XRXTX_IOWRITE(priv, RXTX_REG22,
+ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
amd_xgbe_phy_serdes_complete_ratechange(phydev);
@@ -638,6 +680,10 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
+ XRXTX_IOWRITE(priv, RXTX_REG22,
+ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
amd_xgbe_phy_serdes_complete_ratechange(phydev);
@@ -1668,6 +1714,38 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
sizeof(priv->serdes_tx_amp));
}
+ if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PHY_DFE_CFG_PROPERTY,
+ priv->serdes_dfe_tap_cfg,
+ XGBE_PHY_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_DFE_CFG_PROPERTY);
+ goto err_sir1;
+ }
+ } else {
+ memcpy(priv->serdes_dfe_tap_cfg,
+ amd_xgbe_phy_serdes_dfe_tap_cfg,
+ sizeof(priv->serdes_dfe_tap_cfg));
+ }
+
+ if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PHY_DFE_ENA_PROPERTY,
+ priv->serdes_dfe_tap_ena,
+ XGBE_PHY_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_DFE_ENA_PROPERTY);
+ goto err_sir1;
+ }
+ } else {
+ memcpy(priv->serdes_dfe_tap_ena,
+ amd_xgbe_phy_serdes_dfe_tap_ena,
+ sizeof(priv->serdes_dfe_tap_ena));
+ }
+
phydev->priv = priv;
if (!priv->adev || acpi_disabled)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index cdcac6aa4260..52cd8db2c57d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -236,6 +236,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
}
/**
+ * phy_check_valid - check if there is a valid PHY setting which matches
+ * speed, duplex, and feature mask
+ * @speed: speed to match
+ * @duplex: duplex to match
+ * @features: A mask of the valid settings
+ *
+ * Description: Returns true if there is a valid setting, false otherwise.
+ */
+static inline bool phy_check_valid(int speed, int duplex, u32 features)
+{
+ unsigned int idx;
+
+ idx = phy_find_valid(phy_find_setting(speed, duplex), features);
+
+ return settings[idx].speed == speed && settings[idx].duplex == duplex &&
+ (settings[idx].setting & features);
+}
+
+/**
* phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
* @phydev: the target phy_device struct
*
@@ -1045,7 +1064,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
int eee_lp, eee_cap, eee_adv;
u32 lp, cap, adv;
int status;
- unsigned int idx;
/* Read phy status to properly get the right settings */
status = phy_read_status(phydev);
@@ -1077,8 +1095,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
- idx = phy_find_setting(phydev->speed, phydev->duplex);
- if (!(lp & adv & settings[idx].setting))
+ if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
goto eee_exit_err;
if (clk_stop_enable) {
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 0e62274e884a..f1ee71e22241 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -43,9 +43,7 @@
static struct team_port *team_port_get_rcu(const struct net_device *dev)
{
- struct team_port *port = rcu_dereference(dev->rx_handler_data);
-
- return team_port_exists(dev) ? port : NULL;
+ return rcu_dereference(dev->rx_handler_data);
}
static struct team_port *team_port_get_rtnl(const struct net_device *dev)
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3bd9678315ad..7ba8d0885f12 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -161,6 +161,7 @@ config USB_NET_AX8817X
* Linksys USB200M
* Netgear FA120
* Sitecom LN-029
+ * Sitecom LN-028
* Intellinet USB 2.0 Ethernet
* ST Lab USB 2.0 Ethernet
* TrendNet TU2-ET100
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index bf49792062a2..1173a24feda3 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -979,6 +979,10 @@ static const struct usb_device_id products [] = {
USB_DEVICE (0x0df6, 0x0056),
.driver_info = (unsigned long) &ax88178_info,
}, {
+ // Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter"
+ USB_DEVICE (0x0df6, 0x061c),
+ .driver_info = (unsigned long) &ax88178_info,
+}, {
// corega FEther USB2-TX
USB_DEVICE (0x07aa, 0x0017),
.driver_info = (unsigned long) &ax8817x_info,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 9cdfb3fe9c15..778e91531fac 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1594,7 +1594,7 @@ hso_wait_modem_status(struct hso_serial *serial, unsigned long arg)
}
cprev = cnow;
}
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(&tiocmget->waitq, &wait);
return ret;
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 3d18bb0eee85..1bfe0fcaccf5 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -134,6 +134,11 @@ static const struct usb_device_id products [] = {
}, {
USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */
.driver_info = (unsigned long) &prolific_info,
+}, {
+ USB_DEVICE(0x3923, 0x7825), /* National Instruments USB
+ * Host-to-Host Cable
+ */
+ .driver_info = (unsigned long) &prolific_info,
},
{ }, // END
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 83c39e2858bf..88d121d43c08 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -806,21 +806,21 @@ static ssize_t cosa_read(struct file *file,
spin_lock_irqsave(&cosa->lock, flags);
add_wait_queue(&chan->rxwaitq, &wait);
while (!chan->rx_status) {
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&cosa->lock, flags);
schedule();
spin_lock_irqsave(&cosa->lock, flags);
if (signal_pending(current) && chan->rx_status == 0) {
chan->rx_status = 1;
remove_wait_queue(&chan->rxwaitq, &wait);
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
spin_unlock_irqrestore(&cosa->lock, flags);
mutex_unlock(&chan->rlock);
return -ERESTARTSYS;
}
}
remove_wait_queue(&chan->rxwaitq, &wait);
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
kbuf = chan->rxdata;
count = chan->rxsize;
spin_unlock_irqrestore(&cosa->lock, flags);
@@ -890,14 +890,14 @@ static ssize_t cosa_write(struct file *file,
spin_lock_irqsave(&cosa->lock, flags);
add_wait_queue(&chan->txwaitq, &wait);
while (!chan->tx_status) {
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&cosa->lock, flags);
schedule();
spin_lock_irqsave(&cosa->lock, flags);
if (signal_pending(current) && chan->tx_status == 0) {
chan->tx_status = 1;
remove_wait_queue(&chan->txwaitq, &wait);
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
chan->tx_status = 1;
spin_unlock_irqrestore(&cosa->lock, flags);
up(&chan->wsem);
@@ -905,7 +905,7 @@ static ssize_t cosa_write(struct file *file,
}
}
remove_wait_queue(&chan->txwaitq, &wait);
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
up(&chan->wsem);
spin_unlock_irqrestore(&cosa->lock, flags);
kfree(kbuf);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4a4c6586a8d2..8908be6dbc48 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -946,7 +946,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
goto nla_put_failure;
genlmsg_end(skb, msg_head);
- genlmsg_unicast(&init_net, skb, dst_portid);
+ if (genlmsg_unicast(&init_net, skb, dst_portid))
+ goto err_free_txskb;
/* Enqueue the packet */
skb_queue_tail(&data->pending, my_skb);
@@ -955,6 +956,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
return;
nla_put_failure:
+ nlmsg_free(skb);
+err_free_txskb:
printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
ieee80211_free_txskb(hw, my_skb);
data->tx_failed++;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f7a31d2cb3f1..c4d68d768408 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -655,9 +655,15 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
unsigned long flags;
do {
+ int notify;
+
spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
spin_unlock_irqrestore(&queue->response_lock, flags);
+ if (notify)
+ notify_remote_via_irq(queue->tx_irq);
+
if (cons == end)
break;
txp = RING_GET_REQUEST(&queue->tx, cons++);
@@ -1649,17 +1655,28 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
{
struct pending_tx_info *pending_tx_info;
pending_ring_idx_t index;
+ int notify;
unsigned long flags;
pending_tx_info = &queue->pending_tx_info[pending_idx];
+
spin_lock_irqsave(&queue->response_lock, flags);
+
make_tx_response(queue, &pending_tx_info->req, status);
- index = pending_index(queue->pending_prod);
+
+ /* Release the pending index before pusing the Tx response so
+ * its available before a new Tx request is pushed by the
+ * frontend.
+ */
+ index = pending_index(queue->pending_prod++);
queue->pending_ring[index] = pending_idx;
- /* TX shouldn't use the index before we give it back here */
- mb();
- queue->pending_prod++;
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
+
spin_unlock_irqrestore(&queue->response_lock, flags);
+
+ if (notify)
+ notify_remote_via_irq(queue->tx_irq);
}
@@ -1669,7 +1686,6 @@ static void make_tx_response(struct xenvif_queue *queue,
{
RING_IDX i = queue->tx.rsp_prod_pvt;
struct xen_netif_tx_response *resp;
- int notify;
resp = RING_GET_RESPONSE(&queue->tx, i);
resp->id = txp->id;
@@ -1679,9 +1695,6 @@ static void make_tx_response(struct xenvif_queue *queue,
RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
queue->tx.rsp_prod_pvt = ++i;
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
- if (notify)
- notify_remote_via_irq(queue->tx_irq);
}
static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index 1ec694a52379..464bf492ee2a 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -80,7 +80,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
if (err)
return err;
- resource_list_for_each_entry(win, res, list) {
+ resource_list_for_each_entry(win, res) {
struct resource *parent, *res = win->res;
switch (resource_type(res)) {
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 70a5d94cc766..b4f7744f6751 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -31,6 +31,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/suspend.h>
#include <linux/uaccess.h>
#include "rtc-at91rm9200.h"
@@ -54,6 +55,10 @@ static void __iomem *at91_rtc_regs;
static int irq;
static DEFINE_SPINLOCK(at91_rtc_lock);
static u32 at91_rtc_shadow_imr;
+static bool suspended;
+static DEFINE_SPINLOCK(suspended_lock);
+static unsigned long cached_events;
+static u32 at91_rtc_imr;
static void at91_rtc_write_ier(u32 mask)
{
@@ -290,7 +295,9 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
struct rtc_device *rtc = platform_get_drvdata(pdev);
unsigned int rtsr;
unsigned long events = 0;
+ int ret = IRQ_NONE;
+ spin_lock(&suspended_lock);
rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr();
if (rtsr) { /* this interrupt is shared! Is it ours? */
if (rtsr & AT91_RTC_ALARM)
@@ -304,14 +311,22 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
at91_rtc_write(AT91_RTC_SCCR, rtsr); /* clear status reg */
- rtc_update_irq(rtc, 1, events);
+ if (!suspended) {
+ rtc_update_irq(rtc, 1, events);
- dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n", __func__,
- events >> 8, events & 0x000000FF);
+ dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n",
+ __func__, events >> 8, events & 0x000000FF);
+ } else {
+ cached_events |= events;
+ at91_rtc_write_idr(at91_rtc_imr);
+ pm_system_wakeup();
+ }
- return IRQ_HANDLED;
+ ret = IRQ_HANDLED;
}
- return IRQ_NONE; /* not handled */
+ spin_lock(&suspended_lock);
+
+ return ret;
}
static const struct at91_rtc_config at91rm9200_config = {
@@ -401,8 +416,8 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
AT91_RTC_CALEV);
ret = devm_request_irq(&pdev->dev, irq, at91_rtc_interrupt,
- IRQF_SHARED,
- "at91_rtc", pdev);
+ IRQF_SHARED | IRQF_COND_SUSPEND,
+ "at91_rtc", pdev);
if (ret) {
dev_err(&pdev->dev, "IRQ %d already in use.\n", irq);
return ret;
@@ -454,8 +469,6 @@ static void at91_rtc_shutdown(struct platform_device *pdev)
/* AT91RM9200 RTC Power management control */
-static u32 at91_rtc_imr;
-
static int at91_rtc_suspend(struct device *dev)
{
/* this IRQ is shared with DBGU and other hardware which isn't
@@ -464,21 +477,42 @@ static int at91_rtc_suspend(struct device *dev)
at91_rtc_imr = at91_rtc_read_imr()
& (AT91_RTC_ALARM|AT91_RTC_SECEV);
if (at91_rtc_imr) {
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(dev)) {
+ unsigned long flags;
+
enable_irq_wake(irq);
- else
+
+ spin_lock_irqsave(&suspended_lock, flags);
+ suspended = true;
+ spin_unlock_irqrestore(&suspended_lock, flags);
+ } else {
at91_rtc_write_idr(at91_rtc_imr);
+ }
}
return 0;
}
static int at91_rtc_resume(struct device *dev)
{
+ struct rtc_device *rtc = dev_get_drvdata(dev);
+
if (at91_rtc_imr) {
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(dev)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&suspended_lock, flags);
+
+ if (cached_events) {
+ rtc_update_irq(rtc, 1, cached_events);
+ cached_events = 0;
+ }
+
+ suspended = false;
+ spin_unlock_irqrestore(&suspended_lock, flags);
+
disable_irq_wake(irq);
- else
- at91_rtc_write_ier(at91_rtc_imr);
+ }
+ at91_rtc_write_ier(at91_rtc_imr);
}
return 0;
}
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 2183fd2750ab..5ccaee32df72 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -23,6 +23,7 @@
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include <linux/suspend.h>
#include <linux/clk.h>
/*
@@ -77,6 +78,9 @@ struct sam9_rtc {
unsigned int gpbr_offset;
int irq;
struct clk *sclk;
+ bool suspended;
+ unsigned long events;
+ spinlock_t lock;
};
#define rtt_readl(rtc, field) \
@@ -271,14 +275,9 @@ static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
return 0;
}
-/*
- * IRQ handler for the RTC
- */
-static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
+static irqreturn_t at91_rtc_cache_events(struct sam9_rtc *rtc)
{
- struct sam9_rtc *rtc = _rtc;
u32 sr, mr;
- unsigned long events = 0;
/* Shared interrupt may be for another device. Note: reading
* SR clears it, so we must only read it in this irq handler!
@@ -290,18 +289,54 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
/* alarm status */
if (sr & AT91_RTT_ALMS)
- events |= (RTC_AF | RTC_IRQF);
+ rtc->events |= (RTC_AF | RTC_IRQF);
/* timer update/increment */
if (sr & AT91_RTT_RTTINC)
- events |= (RTC_UF | RTC_IRQF);
+ rtc->events |= (RTC_UF | RTC_IRQF);
+
+ return IRQ_HANDLED;
+}
+
+static void at91_rtc_flush_events(struct sam9_rtc *rtc)
+{
+ if (!rtc->events)
+ return;
- rtc_update_irq(rtc->rtcdev, 1, events);
+ rtc_update_irq(rtc->rtcdev, 1, rtc->events);
+ rtc->events = 0;
pr_debug("%s: num=%ld, events=0x%02lx\n", __func__,
- events >> 8, events & 0x000000FF);
+ rtc->events >> 8, rtc->events & 0x000000FF);
+}
- return IRQ_HANDLED;
+/*
+ * IRQ handler for the RTC
+ */
+static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
+{
+ struct sam9_rtc *rtc = _rtc;
+ int ret;
+
+ spin_lock(&rtc->lock);
+
+ ret = at91_rtc_cache_events(rtc);
+
+ /* We're called in suspended state */
+ if (rtc->suspended) {
+ /* Mask irqs coming from this peripheral */
+ rtt_writel(rtc, MR,
+ rtt_readl(rtc, MR) &
+ ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
+ /* Trigger a system wakeup */
+ pm_system_wakeup();
+ } else {
+ at91_rtc_flush_events(rtc);
+ }
+
+ spin_unlock(&rtc->lock);
+
+ return ret;
}
static const struct rtc_class_ops at91_rtc_ops = {
@@ -421,7 +456,8 @@ static int at91_rtc_probe(struct platform_device *pdev)
/* register irq handler after we know what name we'll use */
ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt,
- IRQF_SHARED, dev_name(&rtc->rtcdev->dev), rtc);
+ IRQF_SHARED | IRQF_COND_SUSPEND,
+ dev_name(&rtc->rtcdev->dev), rtc);
if (ret) {
dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq);
return ret;
@@ -482,7 +518,12 @@ static int at91_rtc_suspend(struct device *dev)
rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
if (rtc->imr) {
if (device_may_wakeup(dev) && (mr & AT91_RTT_ALMIEN)) {
+ unsigned long flags;
+
enable_irq_wake(rtc->irq);
+ spin_lock_irqsave(&rtc->lock, flags);
+ rtc->suspended = true;
+ spin_unlock_irqrestore(&rtc->lock, flags);
/* don't let RTTINC cause wakeups */
if (mr & AT91_RTT_RTTINCIEN)
rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
@@ -499,10 +540,18 @@ static int at91_rtc_resume(struct device *dev)
u32 mr;
if (rtc->imr) {
+ unsigned long flags;
+
if (device_may_wakeup(dev))
disable_irq_wake(rtc->irq);
mr = rtt_readl(rtc, MR);
rtt_writel(rtc, MR, mr | rtc->imr);
+
+ spin_lock_irqsave(&rtc->lock, flags);
+ rtc->suspended = false;
+ at91_rtc_cache_events(rtc);
+ at91_rtc_flush_events(rtc);
+ spin_unlock_irqrestore(&rtc->lock, flags);
}
return 0;
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 8c3bfcb115b7..803869c7d7c2 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -399,21 +399,21 @@ ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
* of this RTC chip. We check for it anyways in case support is
* added in the future.
*/
- if (unlikely((seconds >= 0xc0) && (seconds <= 0xff)))
+ if (unlikely(seconds >= 0xc0))
alrm->time.tm_sec = -1;
else
alrm->time.tm_sec = ds1685_rtc_bcd2bin(rtc, seconds,
RTC_SECS_BCD_MASK,
RTC_SECS_BIN_MASK);
- if (unlikely((minutes >= 0xc0) && (minutes <= 0xff)))
+ if (unlikely(minutes >= 0xc0))
alrm->time.tm_min = -1;
else
alrm->time.tm_min = ds1685_rtc_bcd2bin(rtc, minutes,
RTC_MINS_BCD_MASK,
RTC_MINS_BIN_MASK);
- if (unlikely((hours >= 0xc0) && (hours <= 0xff)))
+ if (unlikely(hours >= 0xc0))
alrm->time.tm_hour = -1;
else
alrm->time.tm_hour = ds1685_rtc_bcd2bin(rtc, hours,
@@ -472,13 +472,13 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
* field, and we only support four fields. We put the support
* here anyways for the future.
*/
- if (unlikely((seconds >= 0xc0) && (seconds <= 0xff)))
+ if (unlikely(seconds >= 0xc0))
seconds = 0xff;
- if (unlikely((minutes >= 0xc0) && (minutes <= 0xff)))
+ if (unlikely(minutes >= 0xc0))
minutes = 0xff;
- if (unlikely((hours >= 0xc0) && (hours <= 0xff)))
+ if (unlikely(hours >= 0xc0))
hours = 0xff;
alrm->time.tm_mon = -1;
@@ -528,7 +528,6 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
/* ----------------------------------------------------------------------- */
/* /dev/rtcX Interface functions */
-#ifdef CONFIG_RTC_INTF_DEV
/**
* ds1685_rtc_alarm_irq_enable - replaces ioctl() RTC_AIE on/off.
* @dev: pointer to device structure.
@@ -557,7 +556,6 @@ ds1685_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-#endif
/* ----------------------------------------------------------------------- */
@@ -1612,7 +1610,7 @@ ds1685_rtc_sysfs_time_regs_show(struct device *dev,
ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, false);
/* Make sure we actually matched something. */
- if (!bcd_reg_info && !bin_reg_info)
+ if (!bcd_reg_info || !bin_reg_info)
return -EINVAL;
/* bcd_reg_info->reg == bin_reg_info->reg. */
@@ -1650,7 +1648,7 @@ ds1685_rtc_sysfs_time_regs_store(struct device *dev,
return -EINVAL;
/* Make sure we actually matched something. */
- if (!bcd_reg_info && !bin_reg_info)
+ if (!bcd_reg_info || !bin_reg_info)
return -EINVAL;
/* Check for a valid range. */
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index f3ee439d6f0e..cd4c293f0dd0 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -81,7 +81,9 @@ static int __init sh_pm_runtime_init(void)
if (!of_machine_is_compatible("renesas,emev2") &&
!of_machine_is_compatible("renesas,r7s72100") &&
!of_machine_is_compatible("renesas,r8a73a4") &&
+#ifndef CONFIG_PM_GENERIC_DOMAINS_OF
!of_machine_is_compatible("renesas,r8a7740") &&
+#endif
!of_machine_is_compatible("renesas,r8a7778") &&
!of_machine_is_compatible("renesas,r8a7779") &&
!of_machine_is_compatible("renesas,r8a7790") &&
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index 25d244cbbe8f..031018e7a65b 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -262,13 +262,12 @@ static int int3400_thermal_probe(struct platform_device *pdev)
result = acpi_parse_art(priv->adev->handle, &priv->art_count,
&priv->arts, true);
if (result)
- goto free_priv;
-
+ dev_dbg(&pdev->dev, "_ART table parsing error\n");
result = acpi_parse_trt(priv->adev->handle, &priv->trt_count,
&priv->trts, true);
if (result)
- goto free_art;
+ dev_dbg(&pdev->dev, "_TRT table parsing error\n");
platform_set_drvdata(pdev, priv);
@@ -281,7 +280,7 @@ static int int3400_thermal_probe(struct platform_device *pdev)
&int3400_thermal_params, 0, 0);
if (IS_ERR(priv->thermal)) {
result = PTR_ERR(priv->thermal);
- goto free_trt;
+ goto free_art_trt;
}
priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add(
@@ -295,9 +294,8 @@ static int int3400_thermal_probe(struct platform_device *pdev)
free_zone:
thermal_zone_device_unregister(priv->thermal);
-free_trt:
+free_art_trt:
kfree(priv->trts);
-free_art:
kfree(priv->arts);
free_priv:
kfree(priv);
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
index f88b08877025..1e25133d35e2 100644
--- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
@@ -208,7 +208,7 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
trip_cnt, GFP_KERNEL);
if (!int34x_thermal_zone->aux_trips) {
ret = -ENOMEM;
- goto free_mem;
+ goto err_trip_alloc;
}
trip_mask = BIT(trip_cnt) - 1;
int34x_thermal_zone->aux_trip_nr = trip_cnt;
@@ -248,14 +248,15 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
0, 0);
if (IS_ERR(int34x_thermal_zone->zone)) {
ret = PTR_ERR(int34x_thermal_zone->zone);
- goto free_lpat;
+ goto err_thermal_zone;
}
return int34x_thermal_zone;
-free_lpat:
+err_thermal_zone:
acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
-free_mem:
+ kfree(int34x_thermal_zone->aux_trips);
+err_trip_alloc:
kfree(int34x_thermal_zone);
return ERR_PTR(ret);
}
@@ -266,6 +267,7 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone
{
thermal_zone_device_unregister(int34x_thermal_zone->zone);
acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+ kfree(int34x_thermal_zone->aux_trips);
kfree(int34x_thermal_zone);
}
EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 6ceebd659dd4..12623bc02f46 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -688,6 +688,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
{ X86_VENDOR_INTEL, 6, 0x45},
{ X86_VENDOR_INTEL, 6, 0x46},
{ X86_VENDOR_INTEL, 6, 0x4c},
+ { X86_VENDOR_INTEL, 6, 0x4d},
{ X86_VENDOR_INTEL, 6, 0x56},
{}
};
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 2580a4872f90..fe4e767018c4 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -387,21 +387,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (irq) {
- int ret;
-
/*
* platform has IRQ support.
* Then, driver uses common registers
- */
-
- ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
- dev_name(dev), common);
- if (ret) {
- dev_err(dev, "irq request failed\n ");
- return ret;
- }
-
- /*
* rcar_has_irq_support() will be enabled
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, mres++);
@@ -456,8 +444,16 @@ static int rcar_thermal_probe(struct platform_device *pdev)
}
/* enable temperature comparation */
- if (irq)
+ if (irq) {
+ ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
+ dev_name(dev), common);
+ if (ret) {
+ dev_err(dev, "irq request failed\n ");
+ goto error_unregister;
+ }
+
rcar_thermal_common_write(common, ENR, enr_bits);
+ }
platform_set_drvdata(pdev, common);
@@ -467,9 +463,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
error_unregister:
rcar_thermal_for_each_priv(priv, common) {
- thermal_zone_device_unregister(priv->zone);
if (rcar_has_irq_support(priv))
rcar_thermal_irq_disable(priv);
+ thermal_zone_device_unregister(priv->zone);
}
pm_runtime_put(dev);
@@ -485,9 +481,9 @@ static int rcar_thermal_remove(struct platform_device *pdev)
struct rcar_thermal_priv *priv;
rcar_thermal_for_each_priv(priv, common) {
- thermal_zone_device_unregister(priv->zone);
if (rcar_has_irq_support(priv))
rcar_thermal_irq_disable(priv);
+ thermal_zone_device_unregister(priv->zone);
}
pm_runtime_put(dev);
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 933cd80a6bc5..1d30b0975651 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -682,6 +682,7 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
if (on) {
con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
+ con |= (1 << EXYNOS7_PD_DET_EN_SHIFT);
interrupt_en =
(of_thermal_is_trip_valid(tz, 7)
<< EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
@@ -704,9 +705,9 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
} else {
con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
+ con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT);
interrupt_en = 0; /* Disable all interrupts */
}
- con |= 1 << EXYNOS7_PD_DET_EN_SHIFT;
writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN);
writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
@@ -918,34 +919,16 @@ static irqreturn_t exynos_tmu_irq(int irq, void *id)
}
static const struct of_device_id exynos_tmu_match[] = {
- {
- .compatible = "samsung,exynos3250-tmu",
- },
- {
- .compatible = "samsung,exynos4210-tmu",
- },
- {
- .compatible = "samsung,exynos4412-tmu",
- },
- {
- .compatible = "samsung,exynos5250-tmu",
- },
- {
- .compatible = "samsung,exynos5260-tmu",
- },
- {
- .compatible = "samsung,exynos5420-tmu",
- },
- {
- .compatible = "samsung,exynos5420-tmu-ext-triminfo",
- },
- {
- .compatible = "samsung,exynos5440-tmu",
- },
- {
- .compatible = "samsung,exynos7-tmu",
- },
- {},
+ { .compatible = "samsung,exynos3250-tmu", },
+ { .compatible = "samsung,exynos4210-tmu", },
+ { .compatible = "samsung,exynos4412-tmu", },
+ { .compatible = "samsung,exynos5250-tmu", },
+ { .compatible = "samsung,exynos5260-tmu", },
+ { .compatible = "samsung,exynos5420-tmu", },
+ { .compatible = "samsung,exynos5420-tmu-ext-triminfo", },
+ { .compatible = "samsung,exynos5440-tmu", },
+ { .compatible = "samsung,exynos7-tmu", },
+ { /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, exynos_tmu_match);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 48491d1a81d6..174d3bcf8bd7 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -899,6 +899,22 @@ thermal_cooling_device_trip_point_show(struct device *dev,
return sprintf(buf, "%d\n", instance->trip);
}
+static struct attribute *cooling_device_attrs[] = {
+ &dev_attr_cdev_type.attr,
+ &dev_attr_max_state.attr,
+ &dev_attr_cur_state.attr,
+ NULL,
+};
+
+static const struct attribute_group cooling_device_attr_group = {
+ .attrs = cooling_device_attrs,
+};
+
+static const struct attribute_group *cooling_device_attr_groups[] = {
+ &cooling_device_attr_group,
+ NULL,
+};
+
/* Device management */
/**
@@ -1130,6 +1146,7 @@ __thermal_cooling_device_register(struct device_node *np,
cdev->ops = ops;
cdev->updated = false;
cdev->device.class = &thermal_class;
+ cdev->device.groups = cooling_device_attr_groups;
cdev->devdata = devdata;
dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
result = device_register(&cdev->device);
@@ -1139,21 +1156,6 @@ __thermal_cooling_device_register(struct device_node *np,
return ERR_PTR(result);
}
- /* sys I/F */
- if (type) {
- result = device_create_file(&cdev->device, &dev_attr_cdev_type);
- if (result)
- goto unregister;
- }
-
- result = device_create_file(&cdev->device, &dev_attr_max_state);
- if (result)
- goto unregister;
-
- result = device_create_file(&cdev->device, &dev_attr_cur_state);
- if (result)
- goto unregister;
-
/* Add 'this' new cdev to the global cdev list */
mutex_lock(&thermal_list_lock);
list_add(&cdev->node, &thermal_cdev_list);
@@ -1163,11 +1165,6 @@ __thermal_cooling_device_register(struct device_node *np,
bind_cdev(cdev);
return cdev;
-
-unregister:
- release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
- device_unregister(&cdev->device);
- return ERR_PTR(result);
}
/**
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 634b6ce0e63a..62a5d449c388 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -1402,7 +1402,7 @@ int ti_bandgap_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int ti_bandgap_save_ctxt(struct ti_bandgap *bgp)
{
int i;
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 3fb054a10f6a..a38c1756442a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -429,7 +429,7 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id)
data = ti_bandgap_get_sensor_data(bgp, id);
- if (data && data->cool_dev)
+ if (data)
cpufreq_cooling_unregister(data->cool_dev);
return 0;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 846552bff67d..4e959c43f680 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -47,6 +47,7 @@
#include <linux/gpio/consumer.h>
#include <linux/err.h>
#include <linux/irq.h>
+#include <linux/suspend.h>
#include <asm/io.h>
#include <asm/ioctls.h>
@@ -173,6 +174,12 @@ struct atmel_uart_port {
bool ms_irq_enabled;
bool is_usart; /* usart or uart */
struct timer_list uart_timer; /* uart timer */
+
+ bool suspended;
+ unsigned int pending;
+ unsigned int pending_status;
+ spinlock_t lock_suspended;
+
int (*prepare_rx)(struct uart_port *port);
int (*prepare_tx)(struct uart_port *port);
void (*schedule_rx)(struct uart_port *port);
@@ -1179,12 +1186,15 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- unsigned int status, pending, pass_counter = 0;
+ unsigned int status, pending, mask, pass_counter = 0;
bool gpio_handled = false;
+ spin_lock(&atmel_port->lock_suspended);
+
do {
status = atmel_get_lines_status(port);
- pending = status & UART_GET_IMR(port);
+ mask = UART_GET_IMR(port);
+ pending = status & mask;
if (!gpio_handled) {
/*
* Dealing with GPIO interrupt
@@ -1206,11 +1216,21 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
if (!pending)
break;
+ if (atmel_port->suspended) {
+ atmel_port->pending |= pending;
+ atmel_port->pending_status = status;
+ UART_PUT_IDR(port, mask);
+ pm_system_wakeup();
+ break;
+ }
+
atmel_handle_receive(port, pending);
atmel_handle_status(port, pending, status);
atmel_handle_transmit(port, pending);
} while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
+ spin_unlock(&atmel_port->lock_suspended);
+
return pass_counter ? IRQ_HANDLED : IRQ_NONE;
}
@@ -1742,7 +1762,8 @@ static int atmel_startup(struct uart_port *port)
/*
* Allocate the IRQ
*/
- retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED,
+ retval = request_irq(port->irq, atmel_interrupt,
+ IRQF_SHARED | IRQF_COND_SUSPEND,
tty ? tty->name : "atmel_serial", port);
if (retval) {
dev_err(port->dev, "atmel_startup - Can't get irq\n");
@@ -2513,8 +2534,14 @@ static int atmel_serial_suspend(struct platform_device *pdev,
/* we can not wake up if we're running on slow clock */
atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
- if (atmel_serial_clk_will_stop())
+ if (atmel_serial_clk_will_stop()) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&atmel_port->lock_suspended, flags);
+ atmel_port->suspended = true;
+ spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
device_set_wakeup_enable(&pdev->dev, 0);
+ }
uart_suspend_port(&atmel_uart, port);
@@ -2525,6 +2552,18 @@ static int atmel_serial_resume(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&atmel_port->lock_suspended, flags);
+ if (atmel_port->pending) {
+ atmel_handle_receive(port, atmel_port->pending);
+ atmel_handle_status(port, atmel_port->pending,
+ atmel_port->pending_status);
+ atmel_handle_transmit(port, atmel_port->pending);
+ atmel_port->pending = 0;
+ }
+ atmel_port->suspended = false;
+ spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
uart_resume_port(&atmel_uart, port);
device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
@@ -2593,6 +2632,8 @@ static int atmel_serial_probe(struct platform_device *pdev)
port->backup_imr = 0;
port->uart.line = ret;
+ spin_lock_init(&port->lock_suspended);
+
ret = atmel_init_gpios(port, &pdev->dev);
if (ret < 0)
dev_err(&pdev->dev, "%s",
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index afa06d28725d..2bbfc25e582c 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -591,11 +591,6 @@ static void handle_rx(struct vhost_net *net)
* TODO: support TSO.
*/
iov_iter_advance(&msg.msg_iter, vhost_hlen);
- } else {
- /* It'll come from socket; we'll need to patch
- * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
- */
- iov_iter_advance(&fixup, sizeof(hdr));
}
err = sock->ops->recvmsg(NULL, sock, &msg,
sock_len, MSG_DONTWAIT | MSG_TRUNC);
@@ -609,17 +604,25 @@ static void handle_rx(struct vhost_net *net)
continue;
}
/* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
- if (unlikely(vhost_hlen) &&
- copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) {
- vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
- vq->iov->iov_base);
- break;
+ if (unlikely(vhost_hlen)) {
+ if (copy_to_iter(&hdr, sizeof(hdr),
+ &fixup) != sizeof(hdr)) {
+ vq_err(vq, "Unable to write vnet_hdr "
+ "at addr %p\n", vq->iov->iov_base);
+ break;
+ }
+ } else {
+ /* Header came from socket; we'll need to patch
+ * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
+ */
+ iov_iter_advance(&fixup, sizeof(hdr));
}
/* TODO: Should check and handle checksum. */
num_buffers = cpu_to_vhost16(vq, headcount);
if (likely(mergeable) &&
- copy_to_iter(&num_buffers, 2, &fixup) != 2) {
+ copy_to_iter(&num_buffers, sizeof num_buffers,
+ &fixup) != sizeof num_buffers) {
vq_err(vq, "Failed num_buffers write");
vhost_discard_vq_desc(vq, headcount);
break;
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 6df940528fd2..1443b3c391de 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -208,7 +208,8 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) {
err = request_irq(wdt->irq, wdt_interrupt,
- IRQF_SHARED | IRQF_IRQPOLL,
+ IRQF_SHARED | IRQF_IRQPOLL |
+ IRQF_NO_SUSPEND,
pdev->name, wdt);
if (err)
return err;
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 2140398a2a8c..2ccd3592d41f 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -2,7 +2,7 @@ ifeq ($(filter y, $(CONFIG_ARM) $(CONFIG_ARM64)),)
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
endif
obj-$(CONFIG_X86) += fallback.o
-obj-y += grant-table.o features.o balloon.o manage.o
+obj-y += grant-table.o features.o balloon.o manage.o preempt.o
obj-y += events/
obj-y += xenbus/
diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
new file mode 100644
index 000000000000..a1800c150839
--- /dev/null
+++ b/drivers/xen/preempt.c
@@ -0,0 +1,44 @@
+/*
+ * Preemptible hypercalls
+ *
+ * Copyright (C) 2014 Citrix Systems R&D ltd.
+ *
+ * This source code is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <xen/xen-ops.h>
+
+#ifndef CONFIG_PREEMPT
+
+/*
+ * Some hypercalls issued by the toolstack can take many 10s of
+ * seconds. Allow tasks running hypercalls via the privcmd driver to
+ * be voluntarily preempted even if full kernel preemption is
+ * disabled.
+ *
+ * Such preemptible hypercalls are bracketed by
+ * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
+ * calls.
+ */
+
+DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
+EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
+
+asmlinkage __visible void xen_maybe_preempt_hcall(void)
+{
+ if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
+ && should_resched())) {
+ /*
+ * Clear flag as we may be rescheduled on a different
+ * cpu.
+ */
+ __this_cpu_write(xen_in_preemptible_hcall, false);
+ _cond_resched();
+ __this_cpu_write(xen_in_preemptible_hcall, true);
+ }
+}
+#endif /* CONFIG_PREEMPT */
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 569a13b9e856..59ac71c4a043 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -56,10 +56,12 @@ static long privcmd_ioctl_hypercall(void __user *udata)
if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
return -EFAULT;
+ xen_preemptible_hcall_begin();
ret = privcmd_call(hypercall.op,
hypercall.arg[0], hypercall.arg[1],
hypercall.arg[2], hypercall.arg[3],
hypercall.arg[4]);
+ xen_preemptible_hcall_end();
return ret;
}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 61653a03a8f5..9faca6a60bb0 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -709,12 +709,11 @@ static int prepare_pending_reqs(struct vscsibk_info *info,
static int scsiback_do_cmd_fn(struct vscsibk_info *info)
{
struct vscsiif_back_ring *ring = &info->ring;
- struct vscsiif_request *ring_req;
+ struct vscsiif_request ring_req;
struct vscsibk_pend *pending_req;
RING_IDX rc, rp;
int err, more_to_do;
uint32_t result;
- uint8_t act;
rc = ring->req_cons;
rp = ring->sring->req_prod;
@@ -735,11 +734,10 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
if (!pending_req)
return 1;
- ring_req = RING_GET_REQUEST(ring, rc);
+ ring_req = *RING_GET_REQUEST(ring, rc);
ring->req_cons = ++rc;
- act = ring_req->act;
- err = prepare_pending_reqs(info, ring_req, pending_req);
+ err = prepare_pending_reqs(info, &ring_req, pending_req);
if (err) {
switch (err) {
case -ENODEV:
@@ -755,9 +753,9 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
return 1;
}
- switch (act) {
+ switch (ring_req.act) {
case VSCSIIF_ACT_SCSI_CDB:
- if (scsiback_gnttab_data_map(ring_req, pending_req)) {
+ if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
scsiback_fast_flush_area(pending_req);
scsiback_do_resp_with_sense(NULL,
DRIVER_ERROR << 24, 0, pending_req);
@@ -768,7 +766,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
break;
case VSCSIIF_ACT_SCSI_ABORT:
scsiback_device_action(pending_req, TMR_ABORT_TASK,
- ring_req->ref_rqid);
+ ring_req.ref_rqid);
break;
case VSCSIIF_ACT_SCSI_RESET:
scsiback_device_action(pending_req, TMR_LUN_RESET, 0);