aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bluetooth/btqca.c110
-rw-r--r--drivers/char/tpm/eventlog/acpi.c16
-rw-r--r--drivers/char/tpm/eventlog/efi.c13
-rw-r--r--drivers/char/tpm/eventlog/of.c3
-rw-r--r--drivers/char/tpm/tpm-chip.c1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-bdp.c1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-img.c1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-vdec.c1
-rw-r--r--drivers/clk/mediatek/clk-mtk.c16
-rw-r--r--drivers/clk/mediatek/clk-mtk.h19
-rw-r--r--drivers/edac/qcom_edac.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c16
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c25
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h1
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c22
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.c2
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c7
-rw-r--r--drivers/idle/intel_idle.c4
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c8
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c4
-rw-r--r--drivers/md/md-bitmap.c34
-rw-r--r--drivers/md/md-bitmap.h9
-rw-r--r--drivers/md/md-cluster.c34
-rw-r--r--drivers/md/md.c33
-rw-r--r--drivers/media/cec/platform/stm32/stm32-cec.c9
-rw-r--r--drivers/media/i2c/ad5820.c18
-rw-r--r--drivers/media/i2c/imx274.c5
-rw-r--r--drivers/media/i2c/tc358743.c9
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c9
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.c4
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c27
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.c3
-rw-r--r--drivers/media/platform/xilinx/xilinx-csi2rxss.c8
-rw-r--r--drivers/media/rc/gpio-ir-recv.c10
-rw-r--r--drivers/media/rc/gpio-ir-tx.c9
-rw-r--r--drivers/media/rc/ir-rx51.c9
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c99
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c35
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c2
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h10
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c42
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c100
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c85
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c1
-rw-r--r--drivers/net/geneve.c16
-rw-r--r--drivers/net/gtp.c5
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c24
-rw-r--r--drivers/net/loopback.c14
-rw-r--r--drivers/net/usb/gl620a.c4
-rw-r--r--drivers/nvme/host/ioctl.c3
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c5
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c12
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c11
-rw-r--r--drivers/power/supply/da9150-fg.c4
-rw-r--r--drivers/scsi/scsi_lib.c22
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/soc/mediatek/mtk-devapc.c36
-rw-r--r--drivers/spi/atmel-quadspi.c172
-rw-r--r--drivers/tee/optee/supp.c35
-rw-r--r--drivers/usb/gadget/function/f_midi.c2
-rw-r--r--drivers/usb/gadget/udc/core.c11
78 files changed, 968 insertions, 406 deletions
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 35fb26cbf229..892e2540f008 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -289,6 +289,39 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
}
EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
+static bool qca_filename_has_extension(const char *filename)
+{
+ const char *suffix = strrchr(filename, '.');
+
+ /* File extensions require a dot, but not as the first or last character */
+ if (!suffix || suffix == filename || *(suffix + 1) == '\0')
+ return 0;
+
+ /* Avoid matching directories with names that look like files with extensions */
+ return !strchr(suffix, '/');
+}
+
+static bool qca_get_alt_nvm_file(char *filename, size_t max_size)
+{
+ char fwname[64];
+ const char *suffix;
+
+ /* nvm file name has an extension, replace with .bin */
+ if (qca_filename_has_extension(filename)) {
+ suffix = strrchr(filename, '.');
+ strscpy(fwname, filename, suffix - filename + 1);
+ snprintf(fwname + (suffix - filename),
+ sizeof(fwname) - (suffix - filename), ".bin");
+ /* If nvm file is already the default one, return false to skip the retry. */
+ if (strcmp(fwname, filename) == 0)
+ return false;
+
+ snprintf(filename, max_size, "%s", fwname);
+ return true;
+ }
+ return false;
+}
+
static int qca_tlv_check_data(struct hci_dev *hdev,
struct qca_fw_config *config,
u8 *fw_data, size_t fw_size,
@@ -586,6 +619,19 @@ static int qca_download_firmware(struct hci_dev *hdev,
config->fwname, ret);
return ret;
}
+ }
+ /* If the board-specific file is missing, try loading the default
+ * one, unless that was attempted already.
+ */
+ else if (config->type == TLV_TYPE_NVM &&
+ qca_get_alt_nvm_file(config->fwname, sizeof(config->fwname))) {
+ bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
+ ret = request_firmware(&fw, config->fwname, &hdev->dev);
+ if (ret) {
+ bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
+ config->fwname, ret);
+ return ret;
+ }
} else {
bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
config->fwname, ret);
@@ -722,21 +768,38 @@ static int qca_check_bdaddr(struct hci_dev *hdev, const struct qca_fw_config *co
return 0;
}
-static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
+static void qca_get_nvm_name_by_board(char *fwname, size_t max_size,
+ const char *stem, enum qca_btsoc_type soc_type,
struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
{
const char *variant;
+ const char *prefix;
- /* hsp gf chip */
- if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
- variant = "g";
- else
- variant = "";
+ /* Set the default value to variant and prefix */
+ variant = "";
+ prefix = "b";
- if (bid == 0x0)
- snprintf(fwname, max_size, "qca/hpnv%02x%s.bin", rom_ver, variant);
- else
- snprintf(fwname, max_size, "qca/hpnv%02x%s.%x", rom_ver, variant, bid);
+ if (soc_type == QCA_QCA2066)
+ prefix = "";
+
+ if (soc_type == QCA_WCN6855 || soc_type == QCA_QCA2066) {
+ /* If the chip is manufactured by GlobalFoundries */
+ if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
+ variant = "g";
+ }
+
+ if (rom_ver != 0) {
+ if (bid == 0x0 || bid == 0xffff)
+ snprintf(fwname, max_size, "qca/%s%02x%s.bin", stem, rom_ver, variant);
+ else
+ snprintf(fwname, max_size, "qca/%s%02x%s.%s%02x", stem, rom_ver,
+ variant, prefix, bid);
+ } else {
+ if (bid == 0x0 || bid == 0xffff)
+ snprintf(fwname, max_size, "qca/%s%s.bin", stem, variant);
+ else
+ snprintf(fwname, max_size, "qca/%s%s.%s%02x", stem, variant, prefix, bid);
+ }
}
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
@@ -819,14 +882,20 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Give the controller some time to get ready to receive the NVM */
msleep(10);
- if (soc_type == QCA_QCA2066)
+ if (soc_type == QCA_QCA2066 || soc_type == QCA_WCN7850)
qca_read_fw_board_id(hdev, &boardid);
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
if (firmware_name) {
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/%s", firmware_name);
+ /* The firmware name has an extension, use it directly */
+ if (qca_filename_has_extension(firmware_name)) {
+ snprintf(config.fwname, sizeof(config.fwname), "qca/%s", firmware_name);
+ } else {
+ qca_read_fw_board_id(hdev, &boardid);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ firmware_name, soc_type, ver, 0, boardid);
+ }
} else {
switch (soc_type) {
case QCA_WCN3990:
@@ -845,8 +914,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
"qca/apnv%02x.bin", rom_ver);
break;
case QCA_QCA2066:
- qca_generate_hsp_nvm_name(config.fwname,
- sizeof(config.fwname), ver, rom_ver, boardid);
+ qca_get_nvm_name_by_board(config.fwname,
+ sizeof(config.fwname), "hpnv", soc_type, ver,
+ rom_ver, boardid);
break;
case QCA_QCA6390:
snprintf(config.fwname, sizeof(config.fwname),
@@ -857,14 +927,14 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
"qca/msnv%02x.bin", rom_ver);
break;
case QCA_WCN6855:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hpnv%02x.bin", rom_ver);
+ qca_read_fw_board_id(hdev, &boardid);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ "hpnv", soc_type, ver, rom_ver, boardid);
break;
case QCA_WCN7850:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hmtnv%02x.bin", rom_ver);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ "hmtnv", soc_type, ver, rom_ver, boardid);
break;
-
default:
snprintf(config.fwname, sizeof(config.fwname),
"qca/nvm_%08x.bin", soc_ver);
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
index cd266021d010..1a5644051d31 100644
--- a/drivers/char/tpm/eventlog/acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -14,6 +14,7 @@
* Access to the event log extended by the TCG BIOS of PC platform
*/
+#include <linux/device.h>
#include <linux/seq_file.h>
#include <linux/fs.h>
#include <linux/security.h>
@@ -62,6 +63,11 @@ static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
return n == 0;
}
+static void tpm_bios_log_free(void *data)
+{
+ kvfree(data);
+}
+
/* read binary bios log */
int tpm_read_log_acpi(struct tpm_chip *chip)
{
@@ -135,7 +141,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
}
/* malloc EventLog space */
- log->bios_event_log = kmalloc(len, GFP_KERNEL);
+ log->bios_event_log = kvmalloc(len, GFP_KERNEL);
if (!log->bios_event_log)
return -ENOMEM;
@@ -161,10 +167,16 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
goto err;
}
+ ret = devm_add_action(&chip->dev, tpm_bios_log_free, log->bios_event_log);
+ if (ret) {
+ log->bios_event_log = NULL;
+ goto err;
+ }
+
return format;
err:
- kfree(log->bios_event_log);
+ tpm_bios_log_free(log->bios_event_log);
log->bios_event_log = NULL;
return ret;
}
diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
index e6cb9d525e30..4e9d7c2bf32e 100644
--- a/drivers/char/tpm/eventlog/efi.c
+++ b/drivers/char/tpm/eventlog/efi.c
@@ -6,6 +6,7 @@
* Thiebaud Weksteen <tweek@google.com>
*/
+#include <linux/device.h>
#include <linux/efi.h>
#include <linux/tpm_eventlog.h>
@@ -55,7 +56,7 @@ int tpm_read_log_efi(struct tpm_chip *chip)
}
/* malloc EventLog space */
- log->bios_event_log = kmemdup(log_tbl->log, log_size, GFP_KERNEL);
+ log->bios_event_log = devm_kmemdup(&chip->dev, log_tbl->log, log_size, GFP_KERNEL);
if (!log->bios_event_log) {
ret = -ENOMEM;
goto out;
@@ -76,7 +77,7 @@ int tpm_read_log_efi(struct tpm_chip *chip)
MEMREMAP_WB);
if (!final_tbl) {
pr_err("Could not map UEFI TPM final log\n");
- kfree(log->bios_event_log);
+ devm_kfree(&chip->dev, log->bios_event_log);
ret = -ENOMEM;
goto out;
}
@@ -91,11 +92,11 @@ int tpm_read_log_efi(struct tpm_chip *chip)
* Allocate memory for the 'combined log' where we will append the
* 'final events log' to.
*/
- tmp = krealloc(log->bios_event_log,
- log_size + final_events_log_size,
- GFP_KERNEL);
+ tmp = devm_krealloc(&chip->dev, log->bios_event_log,
+ log_size + final_events_log_size,
+ GFP_KERNEL);
if (!tmp) {
- kfree(log->bios_event_log);
+ devm_kfree(&chip->dev, log->bios_event_log);
ret = -ENOMEM;
goto out;
}
diff --git a/drivers/char/tpm/eventlog/of.c b/drivers/char/tpm/eventlog/of.c
index a9ce66d09a75..741ab2204b11 100644
--- a/drivers/char/tpm/eventlog/of.c
+++ b/drivers/char/tpm/eventlog/of.c
@@ -10,6 +10,7 @@
* Read the event log created by the firmware on PPC64
*/
+#include <linux/device.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/tpm_eventlog.h>
@@ -65,7 +66,7 @@ int tpm_read_log_of(struct tpm_chip *chip)
return -EIO;
}
- log->bios_event_log = kmemdup(__va(base), size, GFP_KERNEL);
+ log->bios_event_log = devm_kmemdup(&chip->dev, __va(base), size, GFP_KERNEL);
if (!log->bios_event_log)
return -ENOMEM;
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index c0759d49fd14..916ee815b140 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -267,7 +267,6 @@ static void tpm_dev_release(struct device *dev)
idr_remove(&dev_nums_idr, chip->dev_num);
mutex_unlock(&idr_lock);
- kfree(chip->log.bios_event_log);
kfree(chip->work_space.context_buf);
kfree(chip->work_space.session_buf);
kfree(chip->allocated_banks);
diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
index b0f057207945..d2647ea58ae2 100644
--- a/drivers/clk/mediatek/clk-mt2701-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
@@ -31,6 +31,7 @@ static const struct mtk_gate_regs bdp1_cg_regs = {
GATE_MTK(_id, _name, _parent, &bdp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate bdp_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "bdp_dummy"),
GATE_BDP0(CLK_BDP_BRG_BA, "brg_baclk", "mm_sel", 0),
GATE_BDP0(CLK_BDP_BRG_DRAM, "brg_dram", "mm_sel", 1),
GATE_BDP0(CLK_BDP_LARB_DRAM, "larb_dram", "mm_sel", 2),
diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
index eb172473f075..569b6d3607dd 100644
--- a/drivers/clk/mediatek/clk-mt2701-img.c
+++ b/drivers/clk/mediatek/clk-mt2701-img.c
@@ -22,6 +22,7 @@ static const struct mtk_gate_regs img_cg_regs = {
GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate img_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "img_dummy"),
GATE_IMG(CLK_IMG_SMI_COMM, "img_smi_comm", "mm_sel", 0),
GATE_IMG(CLK_IMG_RESZ, "img_resz", "mm_sel", 1),
GATE_IMG(CLK_IMG_JPGDEC_SMI, "img_jpgdec_smi", "mm_sel", 5),
diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
index 0f07c5d731df..fdd2645c167f 100644
--- a/drivers/clk/mediatek/clk-mt2701-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
@@ -31,6 +31,7 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate vdec_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "vdec_dummy"),
GATE_VDEC0(CLK_VDEC_CKGEN, "vdec_cken", "vdec_sel", 0),
GATE_VDEC1(CLK_VDEC_LARB, "vdec_larb_cken", "mm_sel", 0),
};
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 9dbfc11d5c59..7b1ad73309b1 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -21,6 +21,22 @@
#include "clk-gate.h"
#include "clk-mux.h"
+const struct mtk_gate_regs cg_regs_dummy = { 0, 0, 0 };
+EXPORT_SYMBOL_GPL(cg_regs_dummy);
+
+static int mtk_clk_dummy_enable(struct clk_hw *hw)
+{
+ return 0;
+}
+
+static void mtk_clk_dummy_disable(struct clk_hw *hw) { }
+
+const struct clk_ops mtk_clk_dummy_ops = {
+ .enable = mtk_clk_dummy_enable,
+ .disable = mtk_clk_dummy_disable,
+};
+EXPORT_SYMBOL_GPL(mtk_clk_dummy_ops);
+
static void mtk_init_clk_data(struct clk_hw_onecell_data *clk_data,
unsigned int clk_num)
{
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index 65c24ab6c947..04f371730ee3 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -22,6 +22,25 @@
struct platform_device;
+/*
+ * We need the clock IDs to start from zero but to maintain devicetree
+ * backwards compatibility we can't change bindings to start from zero.
+ * Only a few platforms are affected, so we solve issues given by the
+ * commonized MTK clocks probe function(s) by adding a dummy clock at
+ * the beginning where needed.
+ */
+#define CLK_DUMMY 0
+
+extern const struct clk_ops mtk_clk_dummy_ops;
+extern const struct mtk_gate_regs cg_regs_dummy;
+
+#define GATE_DUMMY(_id, _name) { \
+ .id = _id, \
+ .name = _name, \
+ .regs = &cg_regs_dummy, \
+ .ops = &mtk_clk_dummy_ops, \
+ }
+
struct mtk_fixed_clk {
int id;
const char *name;
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index 518092d7eaf7..303d0871184a 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -95,7 +95,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b
* Configure interrupt enable registers such that Tag, Data RAM related
* interrupts are propagated to interrupt controller for servicing
*/
- ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
+ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable,
TRP0_INTERRUPT_ENABLE,
TRP0_INTERRUPT_ENABLE);
if (ret)
@@ -113,7 +113,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b
if (ret)
return ret;
- ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
+ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable,
DRP0_INTERRUPT_ENABLE,
DRP0_INTERRUPT_ENABLE);
if (ret)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 19f543ba7205..56e24b050eec 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -885,6 +885,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int i;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -906,6 +907,12 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ /* Update reference counts for HPDs */
+ for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
+ if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
+ drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i);
+ }
}
/**
@@ -921,6 +928,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int i;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -941,4 +949,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ /* Update reference counts for HPDs */
+ for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
+ if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
+ drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i);
+ }
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 3eb8794807d2..cb4b17c3b694 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -51,7 +51,8 @@ static bool link_supports_psrsu(struct dc_link *link)
!link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)
return false;
- return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
+ /* Temporarily disable PSR-SU to avoid glitches */
+ return false;
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 4d2590964a20..75e44d8a7b40 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1862,19 +1862,21 @@ static enum bp_result get_firmware_info_v3_2(
/* Vega12 */
smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
DATA_TABLES(smu_info));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
if (!smu_info_v3_2)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
+
info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
} else if (revision.minor == 3) {
/* Vega20 */
smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
DATA_TABLES(smu_info));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
if (!smu_info_v3_3)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
+
info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
}
@@ -2439,10 +2441,11 @@ static enum bp_result get_integrated_info_v11(
info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
DATA_TABLES(integratedsysteminfo));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
if (info_v11 == NULL)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
+
info->gpu_cap_info =
le32_to_cpu(info_v11->gpucapinfo);
/*
@@ -2654,11 +2657,12 @@ static enum bp_result get_integrated_info_v2_1(
info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1,
DATA_TABLES(integratedsysteminfo));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
if (info_v2_1 == NULL)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
+
info->gpu_cap_info =
le32_to_cpu(info_v2_1->gpucapinfo);
/*
@@ -2816,11 +2820,11 @@ static enum bp_result get_integrated_info_v2_2(
info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2,
DATA_TABLES(integratedsysteminfo));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
-
if (info_v2_2 == NULL)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
+
info->gpu_cap_info =
le32_to_cpu(info_v2_2->gpucapinfo);
/*
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index a24f3b35ae91..a75c04d510fd 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -3056,6 +3056,7 @@ static int kv_dpm_hw_init(void *handle)
if (!amdgpu_dpm)
return 0;
+ mutex_lock(&adev->pm.mutex);
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
if (ret)
@@ -3063,6 +3064,8 @@ static int kv_dpm_hw_init(void *handle)
else
adev->pm.dpm_enabled = true;
amdgpu_legacy_dpm_compute_clocks(adev);
+ mutex_unlock(&adev->pm.mutex);
+
return ret;
}
@@ -3080,32 +3083,42 @@ static int kv_dpm_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_work_sync(&adev->pm.dpm.thermal.work);
+
if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm_enabled = false;
/* disable dpm */
kv_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ mutex_unlock(&adev->pm.mutex);
}
return 0;
}
static int kv_dpm_resume(void *handle)
{
- int ret;
+ int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->pm.dpm_enabled) {
+ if (!amdgpu_dpm)
+ return 0;
+
+ if (!adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
/* asic init will reset to the boot state */
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
- if (ret)
+ if (ret) {
adev->pm.dpm_enabled = false;
- else
+ } else {
adev->pm.dpm_enabled = true;
- if (adev->pm.dpm_enabled)
amdgpu_legacy_dpm_compute_clocks(adev);
+ }
+ mutex_unlock(&adev->pm.mutex);
}
- return 0;
+ return ret;
}
static bool kv_dpm_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index 291223ea7ba7..2fd97f5cf8f6 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -1018,9 +1018,12 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
int temp, size = sizeof(temp);
- if (!adev->pm.dpm_enabled)
- return;
+ mutex_lock(&adev->pm.mutex);
+ if (!adev->pm.dpm_enabled) {
+ mutex_unlock(&adev->pm.mutex);
+ return;
+ }
if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
AMDGPU_PP_SENSOR_GPU_TEMP,
(void *)&temp,
@@ -1042,4 +1045,5 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
adev->pm.dpm.state = dpm_state;
amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
+ mutex_unlock(&adev->pm.mutex);
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index ff1032de4f76..52e4397d4a2a 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -7796,6 +7796,7 @@ static int si_dpm_hw_init(void *handle)
if (!amdgpu_dpm)
return 0;
+ mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
if (ret)
@@ -7803,6 +7804,7 @@ static int si_dpm_hw_init(void *handle)
else
adev->pm.dpm_enabled = true;
amdgpu_legacy_dpm_compute_clocks(adev);
+ mutex_unlock(&adev->pm.mutex);
return ret;
}
@@ -7820,32 +7822,44 @@ static int si_dpm_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_work_sync(&adev->pm.dpm.thermal.work);
+
if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm_enabled = false;
/* disable dpm */
si_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ mutex_unlock(&adev->pm.mutex);
}
+
return 0;
}
static int si_dpm_resume(void *handle)
{
- int ret;
+ int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->pm.dpm_enabled) {
+ if (!amdgpu_dpm)
+ return 0;
+
+ if (!adev->pm.dpm_enabled) {
/* asic init will reset to the boot state */
+ mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
- if (ret)
+ if (ret) {
adev->pm.dpm_enabled = false;
- else
+ } else {
adev->pm.dpm_enabled = true;
- if (adev->pm.dpm_enabled)
amdgpu_legacy_dpm_compute_clocks(adev);
+ }
+ mutex_unlock(&adev->pm.mutex);
}
- return 0;
+
+ return ret;
}
static bool si_dpm_is_idle(void *handle)
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index fb8d1d63407a..4b5bf23f2336 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -6691,12 +6691,30 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_plane_state *plane_state;
struct intel_crtc_state *crtc_state;
+ struct intel_plane *plane;
struct intel_crtc *crtc;
u8 affected_pipes = 0;
u8 modeset_pipes = 0;
int i;
+ /*
+ * Any plane which is in use by the joiner needs its crtc.
+ * Pull those in first as this will not have happened yet
+ * if the plane remains disabled according to uapi.
+ */
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ crtc = to_intel_crtc(plane_state->hw.crtc);
+ if (!crtc)
+ continue;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ /* Now pull in all joined crtcs */
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
affected_pipes |= crtc_state->bigjoiner_pipes;
if (intel_crtc_needs_modeset(crtc_state))
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index ba8f2ba04629..5f8345016ffe 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -2059,6 +2059,9 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
}
}
+ if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither)
+ phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL);
+
/* reset the merge 3D HW block */
if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
index c8f14555834a..70a6dfe94faa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -46,6 +46,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
u32 slice_last_group_size;
u32 det_thresh_flatness;
bool is_cmd_mode = !(mode & DSC_MODE_VIDEO);
+ bool input_10_bits = dsc->bits_per_component == 10;
DPU_REG_WRITE(c, DSC_COMMON_MODE, mode);
@@ -62,7 +63,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
data |= (dsc->line_buf_depth << 3);
data |= (dsc->simple_422 << 2);
data |= (dsc->convert_rgb << 1);
- data |= dsc->bits_per_component;
+ data |= input_10_bits;
DPU_REG_WRITE(c, DSC_ENC, data);
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index be6674fb1af7..d0bff13ac758 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -592,6 +592,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
struct mm_struct *mm = svmm->notifier.mm;
+ struct folio *folio;
struct page *page;
unsigned long start = args->p.addr;
unsigned long notifier_seq;
@@ -618,12 +619,16 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
ret = -EINVAL;
goto out;
}
+ folio = page_folio(page);
mutex_lock(&svmm->mutex);
if (!mmu_interval_read_retry(&notifier->notifier,
notifier_seq))
break;
mutex_unlock(&svmm->mutex);
+
+ folio_unlock(folio);
+ folio_put(folio);
}
/* Map the page on the GPU. */
@@ -639,8 +644,8 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
mutex_unlock(&svmm->mutex);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
out:
mmu_interval_notifier_remove(&notifier->notifier);
diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
index a7f2b7f66a17..9ec9c43971df 100644
--- a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
@@ -396,7 +396,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
for (timeout = 10; timeout > 0; --timeout) {
if ((rcar_mipi_dsi_read(dsi, PPICLSR) & PPICLSR_STPST) &&
(rcar_mipi_dsi_read(dsi, PPIDLSR) & PPIDLSR_STPST) &&
- (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK))
+ (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK_PHY))
break;
usleep_range(1000, 2000);
diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h
index 2eaca54636f3..1f1eb46c721f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h
@@ -141,7 +141,6 @@
#define CLOCKSET1 0x101c
#define CLOCKSET1_LOCK_PHY (1 << 17)
-#define CLOCKSET1_LOCK (1 << 16)
#define CLOCKSET1_CLKSEL (1 << 8)
#define CLOCKSET1_CLKINSEL_EXTAL (0 << 2)
#define CLOCKSET1_CLKINSEL_DIG (1 << 2)
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index c986d432af50..38b2ae0d7ec1 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -2655,14 +2655,32 @@ static void dispc_init_errata(struct dispc_device *dispc)
}
}
+/*
+ * K2G display controller does not support soft reset, so we do a basic manual
+ * reset here: make sure the IRQs are masked and VPs are disabled.
+ */
+static void dispc_softreset_k2g(struct dispc_device *dispc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dispc->tidss->wait_lock, flags);
+ dispc_set_irqenable(dispc, 0);
+ dispc_read_and_clear_irqstatus(dispc);
+ spin_unlock_irqrestore(&dispc->tidss->wait_lock, flags);
+
+ for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx)
+ VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0);
+}
+
static int dispc_softreset(struct dispc_device *dispc)
{
u32 val;
int ret = 0;
- /* K2G display controller does not support soft reset */
- if (dispc->feat->subrev == DISPC_K2G)
+ if (dispc->feat->subrev == DISPC_K2G) {
+ dispc_softreset_k2g(dispc);
return 0;
+ }
/* Soft reset */
REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1);
diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
index 0c681c7600bc..f13c7e434f8e 100644
--- a/drivers/gpu/drm/tidss/tidss_irq.c
+++ b/drivers/gpu/drm/tidss/tidss_irq.c
@@ -60,7 +60,9 @@ static irqreturn_t tidss_irq_handler(int irq, void *arg)
unsigned int id;
dispc_irq_t irqstatus;
+ spin_lock(&tidss->wait_lock);
irqstatus = dispc_read_and_clear_irqstatus(tidss->dispc);
+ spin_unlock(&tidss->wait_lock);
for (id = 0; id < tidss->num_crtcs; id++) {
struct drm_crtc *crtc = tidss->crtcs[id];
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 767dd15b3c88..0947e3d155c5 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -2335,6 +2335,13 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
if (irq < 0)
return irq;
+ /*
+ * Disable the interrupt to avoid the interrupt handler being triggered
+ * incorrectly by the asynchronous interrupt status since the machine
+ * might do a warm reset during the last smbus/i2c transfer session.
+ */
+ npcm_i2c_int_enable(bus, false);
+
ret = devm_request_irq(bus->dev, irq, npcm_i2c_bus_irq, 0,
dev_name(bus->dev), bus);
if (ret)
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index bb3d10099ba4..5d7bbccb52b5 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -56,6 +56,7 @@
#include <asm/nospec-branch.h>
#include <asm/mwait.h>
#include <asm/msr.h>
+#include <asm/tsc.h>
#include <asm/fpu/api.h>
#define INTEL_IDLE_VERSION "0.5.1"
@@ -1583,6 +1584,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
if (intel_idle_state_needs_timer_stop(state))
state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+ if (cx->type > ACPI_STATE_C1 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ mark_tsc_unstable("TSC halts in idle");
+
state->enter = intel_idle;
state->enter_s2idle = intel_idle_s2idle;
}
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index 3e1272695d99..9915504ad1e1 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -444,6 +444,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
struct ib_qp *qp)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ bool new = false;
int err;
if (!counter->id) {
@@ -458,6 +459,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
return err;
counter->id =
MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ new = true;
}
err = mlx5_ib_qp_set_counter(qp, counter);
@@ -467,8 +469,10 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
return 0;
fail_set_counter:
- mlx5_ib_counter_dealloc(counter);
- counter->id = 0;
+ if (new) {
+ mlx5_ib_counter_dealloc(counter);
+ counter->id = 0;
+ }
return err;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 8d132b726c64..d782a494abcd 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4466,6 +4466,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
MLX5_SET(dctc, dctc, counter_set_id, set_id);
+
+ qp->port = attr->port_num;
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
struct mlx5_ib_modify_qp_resp resp = {};
u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {};
@@ -4955,7 +4957,7 @@ static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
}
if (qp_attr_mask & IB_QP_PORT)
- qp_attr->port_num = MLX5_GET(dctc, dctc, port);
+ qp_attr->port_num = mqp->port;
if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
if (qp_attr_mask & IB_QP_AV) {
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 9d8ac04c2346..e18e21b24210 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -2022,33 +2022,29 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
}
EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot);
-
-void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
+int md_bitmap_get_stats(struct bitmap *bitmap, struct md_bitmap_stats *stats)
{
- unsigned long chunk_kb;
struct bitmap_counts *counts;
+ bitmap_super_t *sb;
if (!bitmap)
- return;
+ return -ENOENT;
+ if (bitmap->mddev->bitmap_info.external)
+ return -ENOENT;
+ if (!bitmap->storage.sb_page) /* no superblock */
+ return -EINVAL;
+ sb = kmap_local_page(bitmap->storage.sb_page);
+ stats->sync_size = le64_to_cpu(sb->sync_size);
+ kunmap_local(sb);
counts = &bitmap->counts;
+ stats->missing_pages = counts->missing_pages;
+ stats->pages = counts->pages;
+ stats->file = bitmap->storage.file;
- chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
- seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
- "%lu%s chunk",
- counts->pages - counts->missing_pages,
- counts->pages,
- (counts->pages - counts->missing_pages)
- << (PAGE_SHIFT - 10),
- chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
- chunk_kb ? "KB" : "B");
- if (bitmap->storage.file) {
- seq_printf(seq, ", file: ");
- seq_file_path(seq, bitmap->storage.file, " \t\n");
- }
-
- seq_printf(seq, "\n");
+ return 0;
}
+EXPORT_SYMBOL_GPL(md_bitmap_get_stats);
int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
int chunksize, int init)
diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
index 3a4750952b3a..7b7a701f74be 100644
--- a/drivers/md/md-bitmap.h
+++ b/drivers/md/md-bitmap.h
@@ -233,6 +233,13 @@ struct bitmap {
int cluster_slot; /* Slot offset for clustered env */
};
+struct md_bitmap_stats {
+ unsigned long missing_pages;
+ unsigned long sync_size;
+ unsigned long pages;
+ struct file *file;
+};
+
/* the bitmap API */
/* these are used only by md/bitmap */
@@ -243,7 +250,7 @@ void md_bitmap_destroy(struct mddev *mddev);
void md_bitmap_print_sb(struct bitmap *bitmap);
void md_bitmap_update_sb(struct bitmap *bitmap);
-void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap);
+int md_bitmap_get_stats(struct bitmap *bitmap, struct md_bitmap_stats *stats);
int md_bitmap_setallbits(struct bitmap *bitmap);
void md_bitmap_write_all(struct bitmap *bitmap);
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 10e0c5381d01..7484bb83171a 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -1185,18 +1185,21 @@ out:
*/
static int cluster_check_sync_size(struct mddev *mddev)
{
- int i, rv;
- bitmap_super_t *sb;
- unsigned long my_sync_size, sync_size = 0;
- int node_num = mddev->bitmap_info.nodes;
int current_slot = md_cluster_ops->slot_number(mddev);
+ int node_num = mddev->bitmap_info.nodes;
struct bitmap *bitmap = mddev->bitmap;
- char str[64];
struct dlm_lock_resource *bm_lockres;
+ struct md_bitmap_stats stats;
+ unsigned long sync_size = 0;
+ unsigned long my_sync_size;
+ char str[64];
+ int i, rv;
- sb = kmap_atomic(bitmap->storage.sb_page);
- my_sync_size = sb->sync_size;
- kunmap_atomic(sb);
+ rv = md_bitmap_get_stats(bitmap, &stats);
+ if (rv)
+ return rv;
+
+ my_sync_size = stats.sync_size;
for (i = 0; i < node_num; i++) {
if (i == current_slot)
@@ -1225,15 +1228,18 @@ static int cluster_check_sync_size(struct mddev *mddev)
md_bitmap_update_sb(bitmap);
lockres_free(bm_lockres);
- sb = kmap_atomic(bitmap->storage.sb_page);
- if (sync_size == 0)
- sync_size = sb->sync_size;
- else if (sync_size != sb->sync_size) {
- kunmap_atomic(sb);
+ rv = md_bitmap_get_stats(bitmap, &stats);
+ if (rv) {
+ md_bitmap_free(bitmap);
+ return rv;
+ }
+
+ if (sync_size == 0) {
+ sync_size = stats.sync_size;
+ } else if (sync_size != stats.sync_size) {
md_bitmap_free(bitmap);
return -1;
}
- kunmap_atomic(sb);
md_bitmap_free(bitmap);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 297c86f5c70b..5e2751d42f64 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8318,6 +8318,33 @@ static void md_seq_stop(struct seq_file *seq, void *v)
mddev_put(mddev);
}
+static void md_bitmap_status(struct seq_file *seq, struct mddev *mddev)
+{
+ struct md_bitmap_stats stats;
+ unsigned long used_pages;
+ unsigned long chunk_kb;
+ int err;
+
+ err = md_bitmap_get_stats(mddev->bitmap, &stats);
+ if (err)
+ return;
+
+ chunk_kb = mddev->bitmap_info.chunksize >> 10;
+ used_pages = stats.pages - stats.missing_pages;
+
+ seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], %lu%s chunk",
+ used_pages, stats.pages, used_pages << (PAGE_SHIFT - 10),
+ chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize,
+ chunk_kb ? "KB" : "B");
+
+ if (stats.file) {
+ seq_puts(seq, ", file: ");
+ seq_file_path(seq, stats.file, " \t\n");
+ }
+
+ seq_putc(seq, '\n');
+}
+
static int md_seq_show(struct seq_file *seq, void *v)
{
struct mddev *mddev = v;
@@ -8341,6 +8368,9 @@ static int md_seq_show(struct seq_file *seq, void *v)
return 0;
}
+ /* prevent bitmap to be freed after checking */
+ mutex_lock(&mddev->bitmap_info.mutex);
+
spin_lock(&mddev->lock);
if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
seq_printf(seq, "%s : %sactive", mdname(mddev),
@@ -8406,11 +8436,12 @@ static int md_seq_show(struct seq_file *seq, void *v)
} else
seq_printf(seq, "\n ");
- md_bitmap_status(seq, mddev->bitmap);
+ md_bitmap_status(seq, mddev);
seq_printf(seq, "\n");
}
spin_unlock(&mddev->lock);
+ mutex_unlock(&mddev->bitmap_info.mutex);
return 0;
}
diff --git a/drivers/media/cec/platform/stm32/stm32-cec.c b/drivers/media/cec/platform/stm32/stm32-cec.c
index 40db7911b437..7b2db46a5722 100644
--- a/drivers/media/cec/platform/stm32/stm32-cec.c
+++ b/drivers/media/cec/platform/stm32/stm32-cec.c
@@ -288,12 +288,9 @@ static int stm32_cec_probe(struct platform_device *pdev)
return ret;
cec->clk_cec = devm_clk_get(&pdev->dev, "cec");
- if (IS_ERR(cec->clk_cec)) {
- if (PTR_ERR(cec->clk_cec) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Cannot get cec clock\n");
-
- return PTR_ERR(cec->clk_cec);
- }
+ if (IS_ERR(cec->clk_cec))
+ return dev_err_probe(&pdev->dev, PTR_ERR(cec->clk_cec),
+ "Cannot get cec clock\n");
ret = clk_prepare(cec->clk_cec);
if (ret) {
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
index 088c29c4e252..56d22d02a0d9 100644
--- a/drivers/media/i2c/ad5820.c
+++ b/drivers/media/i2c/ad5820.c
@@ -301,21 +301,15 @@ static int ad5820_probe(struct i2c_client *client,
return -ENOMEM;
coil->vana = devm_regulator_get(&client->dev, "VANA");
- if (IS_ERR(coil->vana)) {
- ret = PTR_ERR(coil->vana);
- if (ret != -EPROBE_DEFER)
- dev_err(&client->dev, "could not get regulator for vana\n");
- return ret;
- }
+ if (IS_ERR(coil->vana))
+ return dev_err_probe(&client->dev, PTR_ERR(coil->vana),
+ "could not get regulator for vana\n");
coil->enable_gpio = devm_gpiod_get_optional(&client->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR(coil->enable_gpio)) {
- ret = PTR_ERR(coil->enable_gpio);
- if (ret != -EPROBE_DEFER)
- dev_err(&client->dev, "could not get enable gpio\n");
- return ret;
- }
+ if (IS_ERR(coil->enable_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(coil->enable_gpio),
+ "could not get enable gpio\n");
mutex_init(&coil->power_lock);
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index a00761b1e18c..9219f3c9594b 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -2060,9 +2060,8 @@ static int imx274_probe(struct i2c_client *client)
imx274->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(imx274->reset_gpio)) {
- if (PTR_ERR(imx274->reset_gpio) != -EPROBE_DEFER)
- dev_err(dev, "Reset GPIO not setup in DT");
- ret = PTR_ERR(imx274->reset_gpio);
+ ret = dev_err_probe(dev, PTR_ERR(imx274->reset_gpio),
+ "Reset GPIO not setup in DT\n");
goto err_me;
}
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 45dd91d1cd81..2c8189e04a13 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -1891,12 +1891,9 @@ static int tc358743_probe_of(struct tc358743_state *state)
int ret;
refclk = devm_clk_get(dev, "refclk");
- if (IS_ERR(refclk)) {
- if (PTR_ERR(refclk) != -EPROBE_DEFER)
- dev_err(dev, "failed to get refclk: %ld\n",
- PTR_ERR(refclk));
- return PTR_ERR(refclk);
- }
+ if (IS_ERR(refclk))
+ return dev_err_probe(dev, PTR_ERR(refclk),
+ "failed to get refclk\n");
ep = of_graph_get_next_endpoint(dev->of_node, NULL);
if (!ep) {
diff --git a/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c
index 1e3833f1c9ae..ad5fab2d8bfa 100644
--- a/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c
@@ -52,9 +52,8 @@ int mtk_mdp_comp_init(struct device *dev, struct device_node *node,
for (i = 0; i < ARRAY_SIZE(comp->clk); i++) {
comp->clk[i] = of_clk_get(node, i);
if (IS_ERR(comp->clk[i])) {
- if (PTR_ERR(comp->clk[i]) != -EPROBE_DEFER)
- dev_err(dev, "Failed to get clock\n");
- ret = PTR_ERR(comp->clk[i]);
+ ret = dev_err_probe(dev, PTR_ERR(comp->clk[i]),
+ "Failed to get clock\n");
goto put_dev;
}
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c
index d8e66b645bd8..27f08b1d34d1 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c
@@ -65,6 +65,8 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(struct mtk_vcodec_dev *dev)
}
fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return ERR_PTR(-ENOMEM);
fw->type = SCP;
fw->ops = &mtk_vcodec_rproc_msg;
fw->scp = scp;
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
index 999ce7ee5fdc..6952875ca183 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
@@ -729,11 +729,16 @@ static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs
return vpu_dec_reset(vpu);
fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx);
+ if (!fb) {
+ mtk_vcodec_err(inst, "fb buffer is NULL");
+ return -ENOMEM;
+ }
+
src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
- c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+ y_fb_dma = fb->base_y.dma_addr;
+ c_fb_dma = fb->base_c.dma_addr;
mtk_vcodec_debug(inst, "[h264-dec] [%d] y_dma=%llx c_dma=%llx",
inst->ctx->decoded_frame_cnt, y_fb_dma, c_fb_dma);
diff --git a/drivers/media/platform/samsung/exynos4-is/media-dev.c b/drivers/media/platform/samsung/exynos4-is/media-dev.c
index 2f3071acb9c9..98a60f01129d 100644
--- a/drivers/media/platform/samsung/exynos4-is/media-dev.c
+++ b/drivers/media/platform/samsung/exynos4-is/media-dev.c
@@ -1471,9 +1471,7 @@ static int fimc_md_probe(struct platform_device *pdev)
pinctrl = devm_pinctrl_get(dev);
if (IS_ERR(pinctrl)) {
- ret = PTR_ERR(pinctrl);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get pinctrl: %d\n", ret);
+ ret = dev_err_probe(dev, PTR_ERR(pinctrl), "Failed to get pinctrl\n");
goto err_clk;
}
diff --git a/drivers/media/platform/st/stm32/stm32-dcmi.c b/drivers/media/platform/st/stm32/stm32-dcmi.c
index 37458d4d9564..06be28b361f1 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmi.c
@@ -1946,12 +1946,9 @@ static int dcmi_probe(struct platform_device *pdev)
return -ENOMEM;
dcmi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(dcmi->rstc)) {
- if (PTR_ERR(dcmi->rstc) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Could not get reset control\n");
-
- return PTR_ERR(dcmi->rstc);
- }
+ if (IS_ERR(dcmi->rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dcmi->rstc),
+ "Could not get reset control\n");
/* Get bus characteristics from devicetree */
np = of_graph_get_next_endpoint(np, NULL);
@@ -2003,20 +2000,14 @@ static int dcmi_probe(struct platform_device *pdev)
}
mclk = devm_clk_get(&pdev->dev, "mclk");
- if (IS_ERR(mclk)) {
- if (PTR_ERR(mclk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Unable to get mclk\n");
- return PTR_ERR(mclk);
- }
+ if (IS_ERR(mclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(mclk),
+ "Unable to get mclk\n");
chan = dma_request_chan(&pdev->dev, "tx");
- if (IS_ERR(chan)) {
- ret = PTR_ERR(chan);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Failed to request DMA channel: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(chan))
+ return dev_err_probe(&pdev->dev, PTR_ERR(chan),
+ "Failed to request DMA channel\n");
dcmi->dma_max_burst = UINT_MAX;
ret = dma_get_slave_caps(chan, &caps);
diff --git a/drivers/media/platform/ti/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c
index 11ae479ee89c..e7327e38482d 100644
--- a/drivers/media/platform/ti/omap3isp/isp.c
+++ b/drivers/media/platform/ti/omap3isp/isp.c
@@ -1884,8 +1884,7 @@ static int isp_initialize_modules(struct isp_device *isp)
ret = omap3isp_ccp2_init(isp);
if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(isp->dev, "CCP2 initialization failed\n");
+ dev_err_probe(isp->dev, ret, "CCP2 initialization failed\n");
goto error_ccp2;
}
diff --git a/drivers/media/platform/xilinx/xilinx-csi2rxss.c b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
index 29b53febc2e7..d8a23f18cfbc 100644
--- a/drivers/media/platform/xilinx/xilinx-csi2rxss.c
+++ b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
@@ -976,11 +976,9 @@ static int xcsi2rxss_probe(struct platform_device *pdev)
/* Reset GPIO */
xcsi2rxss->rst_gpio = devm_gpiod_get_optional(dev, "video-reset",
GPIOD_OUT_HIGH);
- if (IS_ERR(xcsi2rxss->rst_gpio)) {
- if (PTR_ERR(xcsi2rxss->rst_gpio) != -EPROBE_DEFER)
- dev_err(dev, "Video Reset GPIO not setup in DT");
- return PTR_ERR(xcsi2rxss->rst_gpio);
- }
+ if (IS_ERR(xcsi2rxss->rst_gpio))
+ return dev_err_probe(dev, PTR_ERR(xcsi2rxss->rst_gpio),
+ "Video Reset GPIO not setup in DT\n");
ret = xcsi2rxss_parse_of(xcsi2rxss);
if (ret < 0)
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 16795e07dc10..41ef8cdba28c 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -74,13 +74,9 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
return -ENOMEM;
gpio_dev->gpiod = devm_gpiod_get(dev, NULL, GPIOD_IN);
- if (IS_ERR(gpio_dev->gpiod)) {
- rc = PTR_ERR(gpio_dev->gpiod);
- /* Just try again if this happens */
- if (rc != -EPROBE_DEFER)
- dev_err(dev, "error getting gpio (%d)\n", rc);
- return rc;
- }
+ if (IS_ERR(gpio_dev->gpiod))
+ return dev_err_probe(dev, PTR_ERR(gpio_dev->gpiod),
+ "error getting gpio\n");
gpio_dev->irq = gpiod_to_irq(gpio_dev->gpiod);
if (gpio_dev->irq < 0)
return gpio_dev->irq;
diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c
index d3063ddb472e..2b829c146db1 100644
--- a/drivers/media/rc/gpio-ir-tx.c
+++ b/drivers/media/rc/gpio-ir-tx.c
@@ -174,12 +174,9 @@ static int gpio_ir_tx_probe(struct platform_device *pdev)
return -ENOMEM;
gpio_ir->gpio = devm_gpiod_get(&pdev->dev, NULL, GPIOD_OUT_LOW);
- if (IS_ERR(gpio_ir->gpio)) {
- if (PTR_ERR(gpio_ir->gpio) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get gpio (%ld)\n",
- PTR_ERR(gpio_ir->gpio));
- return PTR_ERR(gpio_ir->gpio);
- }
+ if (IS_ERR(gpio_ir->gpio))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpio_ir->gpio),
+ "Failed to get gpio\n");
rcdev->priv = gpio_ir;
rcdev->driver_name = DRIVER_NAME;
diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
index a3b145183260..85080c3d2053 100644
--- a/drivers/media/rc/ir-rx51.c
+++ b/drivers/media/rc/ir-rx51.c
@@ -231,13 +231,8 @@ static int ir_rx51_probe(struct platform_device *dev)
struct rc_dev *rcdev;
pwm = pwm_get(&dev->dev, NULL);
- if (IS_ERR(pwm)) {
- int err = PTR_ERR(pwm);
-
- if (err != -EPROBE_DEFER)
- dev_err(&dev->dev, "pwm_get failed: %d\n", err);
- return err;
- }
+ if (IS_ERR(pwm))
+ return dev_err_probe(&dev->dev, PTR_ERR(pwm), "pwm_get failed\n");
/* Use default, in case userspace does not set the carrier */
ir_rx51.freq = DIV_ROUND_CLOSEST_ULL(pwm_get_period(pwm), NSEC_PER_SEC);
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 1bad64f4499a..69f9f451ab40 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -1470,6 +1470,40 @@ static void uvc_ctrl_send_slave_event(struct uvc_video_chain *chain,
uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes);
}
+static void uvc_ctrl_set_handle(struct uvc_fh *handle, struct uvc_control *ctrl,
+ struct uvc_fh *new_handle)
+{
+ lockdep_assert_held(&handle->chain->ctrl_mutex);
+
+ if (new_handle) {
+ if (ctrl->handle)
+ dev_warn_ratelimited(&handle->stream->dev->udev->dev,
+ "UVC non compliance: Setting an async control with a pending operation.");
+
+ if (new_handle == ctrl->handle)
+ return;
+
+ if (ctrl->handle) {
+ WARN_ON(!ctrl->handle->pending_async_ctrls);
+ if (ctrl->handle->pending_async_ctrls)
+ ctrl->handle->pending_async_ctrls--;
+ }
+
+ ctrl->handle = new_handle;
+ handle->pending_async_ctrls++;
+ return;
+ }
+
+ /* Cannot clear the handle for a control not owned by us.*/
+ if (WARN_ON(ctrl->handle != handle))
+ return;
+
+ ctrl->handle = NULL;
+ if (WARN_ON(!handle->pending_async_ctrls))
+ return;
+ handle->pending_async_ctrls--;
+}
+
void uvc_ctrl_status_event(struct uvc_video_chain *chain,
struct uvc_control *ctrl, const u8 *data)
{
@@ -1480,7 +1514,8 @@ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
mutex_lock(&chain->ctrl_mutex);
handle = ctrl->handle;
- ctrl->handle = NULL;
+ if (handle)
+ uvc_ctrl_set_handle(handle, ctrl, NULL);
list_for_each_entry(mapping, &ctrl->info.mappings, list) {
s32 value = __uvc_ctrl_get_value(mapping, data);
@@ -1700,7 +1735,10 @@ int uvc_ctrl_begin(struct uvc_video_chain *chain)
}
static int uvc_ctrl_commit_entity(struct uvc_device *dev,
- struct uvc_entity *entity, int rollback, struct uvc_control **err_ctrl)
+ struct uvc_fh *handle,
+ struct uvc_entity *entity,
+ int rollback,
+ struct uvc_control **err_ctrl)
{
struct uvc_control *ctrl;
unsigned int i;
@@ -1748,6 +1786,10 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
*err_ctrl = ctrl;
return ret;
}
+
+ if (!rollback && handle &&
+ ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+ uvc_ctrl_set_handle(handle, ctrl, handle);
}
return 0;
@@ -1784,18 +1826,20 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
/* Find the control. */
list_for_each_entry(entity, &chain->entities, chain) {
- ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback,
- &err_ctrl);
- if (ret < 0)
+ ret = uvc_ctrl_commit_entity(chain->dev, handle, entity,
+ rollback, &err_ctrl);
+ if (ret < 0) {
+ if (ctrls)
+ ctrls->error_idx =
+ uvc_ctrl_find_ctrl_idx(entity, ctrls,
+ err_ctrl);
goto done;
+ }
}
if (!rollback)
uvc_ctrl_send_events(handle, ctrls->controls, ctrls->count);
done:
- if (ret < 0 && ctrls)
- ctrls->error_idx = uvc_ctrl_find_ctrl_idx(entity, ctrls,
- err_ctrl);
mutex_unlock(&chain->ctrl_mutex);
return ret;
}
@@ -1925,9 +1969,6 @@ int uvc_ctrl_set(struct uvc_fh *handle,
mapping->set(mapping, value,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
- if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
- ctrl->handle = handle;
-
ctrl->dirty = 1;
ctrl->modified = 1;
return 0;
@@ -2100,7 +2141,7 @@ static int uvc_ctrl_init_xu_ctrl(struct uvc_device *dev,
int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
struct uvc_xu_control_query *xqry)
{
- struct uvc_entity *entity;
+ struct uvc_entity *entity, *iter;
struct uvc_control *ctrl;
unsigned int i;
bool found;
@@ -2110,16 +2151,16 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
int ret;
/* Find the extension unit. */
- found = false;
- list_for_each_entry(entity, &chain->entities, chain) {
- if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT &&
- entity->id == xqry->unit) {
- found = true;
+ entity = NULL;
+ list_for_each_entry(iter, &chain->entities, chain) {
+ if (UVC_ENTITY_TYPE(iter) == UVC_VC_EXTENSION_UNIT &&
+ iter->id == xqry->unit) {
+ entity = iter;
break;
}
}
- if (!found) {
+ if (!entity) {
uvc_dbg(chain->dev, CONTROL, "Extension unit %u not found\n",
xqry->unit);
return -ENOENT;
@@ -2256,7 +2297,7 @@ int uvc_ctrl_restore_values(struct uvc_device *dev)
ctrl->dirty = 1;
}
- ret = uvc_ctrl_commit_entity(dev, entity, 0, NULL);
+ ret = uvc_ctrl_commit_entity(dev, NULL, entity, 0, NULL);
if (ret < 0)
return ret;
}
@@ -2660,6 +2701,26 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
return 0;
}
+void uvc_ctrl_cleanup_fh(struct uvc_fh *handle)
+{
+ struct uvc_entity *entity;
+
+ guard(mutex)(&handle->chain->ctrl_mutex);
+
+ if (!handle->pending_async_ctrls)
+ return;
+
+ list_for_each_entry(entity, &handle->chain->dev->entities, list) {
+ for (unsigned int i = 0; i < entity->ncontrols; ++i) {
+ if (entity->controls[i].handle != handle)
+ continue;
+ uvc_ctrl_set_handle(handle, &entity->controls[i], NULL);
+ }
+ }
+
+ WARN_ON(handle->pending_async_ctrls);
+}
+
/*
* Cleanup device controls.
*/
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index c8e72079b427..47a6cedd5578 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1247,18 +1247,15 @@ static int uvc_gpio_parse(struct uvc_device *dev)
struct gpio_desc *gpio_privacy;
int irq;
- gpio_privacy = devm_gpiod_get_optional(&dev->udev->dev, "privacy",
+ gpio_privacy = devm_gpiod_get_optional(&dev->intf->dev, "privacy",
GPIOD_IN);
if (IS_ERR_OR_NULL(gpio_privacy))
return PTR_ERR_OR_ZERO(gpio_privacy);
irq = gpiod_to_irq(gpio_privacy);
- if (irq < 0) {
- if (irq != EPROBE_DEFER)
- dev_err(&dev->udev->dev,
- "No IRQ for privacy GPIO (%d)\n", irq);
- return irq;
- }
+ if (irq < 0)
+ return dev_err_probe(&dev->intf->dev, irq,
+ "No IRQ for privacy GPIO\n");
unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1);
if (!unit)
@@ -1283,15 +1280,27 @@ static int uvc_gpio_parse(struct uvc_device *dev)
static int uvc_gpio_init_irq(struct uvc_device *dev)
{
struct uvc_entity *unit = dev->gpio_unit;
+ int ret;
if (!unit || unit->gpio.irq < 0)
return 0;
- return devm_request_threaded_irq(&dev->udev->dev, unit->gpio.irq, NULL,
- uvc_gpio_irq,
- IRQF_ONESHOT | IRQF_TRIGGER_FALLING |
- IRQF_TRIGGER_RISING,
- "uvc_privacy_gpio", dev);
+ ret = request_threaded_irq(unit->gpio.irq, NULL, uvc_gpio_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING,
+ "uvc_privacy_gpio", dev);
+
+ unit->gpio.initialized = !ret;
+
+ return ret;
+}
+
+static void uvc_gpio_deinit(struct uvc_device *dev)
+{
+ if (!dev->gpio_unit || !dev->gpio_unit->gpio.initialized)
+ return;
+
+ free_irq(dev->gpio_unit->gpio.irq, dev);
}
/* ------------------------------------------------------------------------
@@ -1885,6 +1894,8 @@ static void uvc_unregister_video(struct uvc_device *dev)
{
struct uvc_streaming *stream;
+ uvc_gpio_deinit(dev);
+
list_for_each_entry(stream, &dev->streams, list) {
/* Nothing to do here, continue. */
if (!video_is_registered(&stream->vdev))
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 950b42d78a10..bd4677a6e653 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -607,6 +607,8 @@ static int uvc_v4l2_release(struct file *file)
uvc_dbg(stream->dev, CALLS, "%s\n", __func__);
+ uvc_ctrl_cleanup_fh(handle);
+
/* Only free resources if this is a privileged handle. */
if (uvc_has_privileges(handle))
uvc_queue_release(&stream->queue);
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 33e7475d4e64..45caa8523426 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -227,6 +227,7 @@ struct uvc_entity {
u8 *bmControls;
struct gpio_desc *gpio_privacy;
int irq;
+ bool initialized;
} gpio;
};
@@ -330,7 +331,11 @@ struct uvc_video_chain {
struct uvc_entity *processing; /* Processing unit */
struct uvc_entity *selector; /* Selector unit */
- struct mutex ctrl_mutex; /* Protects ctrl.info */
+ struct mutex ctrl_mutex; /*
+ * Protects ctrl.info,
+ * ctrl.handle and
+ * uvc_fh.pending_async_ctrls
+ */
struct v4l2_prio_state prio; /* V4L2 priority state */
u32 caps; /* V4L2 chain-wide caps */
@@ -584,6 +589,7 @@ struct uvc_fh {
struct uvc_video_chain *chain;
struct uvc_streaming *stream;
enum uvc_handle_state state;
+ unsigned int pending_async_ctrls;
};
struct uvc_driver {
@@ -768,6 +774,8 @@ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
struct uvc_xu_control_query *xqry);
+void uvc_ctrl_cleanup_fh(struct uvc_fh *handle);
+
/* Utility functions */
struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts,
u8 epaddr);
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index 9dac3ca69d57..7d686a96d44c 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -469,6 +469,8 @@ struct cdns_nand_ctrl {
struct {
void __iomem *virt;
dma_addr_t dma;
+ dma_addr_t iova_dma;
+ u32 size;
} io;
int irq;
@@ -1830,11 +1832,11 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
}
if (dir == DMA_FROM_DEVICE) {
- src_dma = cdns_ctrl->io.dma;
+ src_dma = cdns_ctrl->io.iova_dma;
dst_dma = buf_dma;
} else {
src_dma = buf_dma;
- dst_dma = cdns_ctrl->io.dma;
+ dst_dma = cdns_ctrl->io.iova_dma;
}
tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
@@ -1856,12 +1858,12 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
dma_async_issue_pending(cdns_ctrl->dmac);
wait_for_completion(&finished);
- dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+ dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
return 0;
err_unmap:
- dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+ dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
err:
dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
@@ -2828,6 +2830,7 @@ cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
{
dma_cap_mask_t mask;
+ struct dma_device *dma_dev = cdns_ctrl->dmac->device;
int ret;
cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
@@ -2863,15 +2866,24 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
dma_cap_set(DMA_MEMCPY, mask);
if (cdns_ctrl->caps1->has_dma) {
- cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
- if (!cdns_ctrl->dmac) {
- dev_err(cdns_ctrl->dev,
- "Unable to get a DMA channel\n");
- ret = -EBUSY;
+ cdns_ctrl->dmac = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(cdns_ctrl->dmac)) {
+ ret = dev_err_probe(cdns_ctrl->dev, PTR_ERR(cdns_ctrl->dmac),
+ "%d: Failed to get a DMA channel\n", ret);
goto disable_irq;
}
}
+ cdns_ctrl->io.iova_dma = dma_map_resource(dma_dev->dev, cdns_ctrl->io.dma,
+ cdns_ctrl->io.size,
+ DMA_BIDIRECTIONAL, 0);
+
+ ret = dma_mapping_error(dma_dev->dev, cdns_ctrl->io.iova_dma);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "Failed to map I/O resource to DMA\n");
+ goto dma_release_chnl;
+ }
+
nand_controller_init(&cdns_ctrl->controller);
INIT_LIST_HEAD(&cdns_ctrl->chips);
@@ -2882,18 +2894,22 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
if (ret) {
dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
ret);
- goto dma_release_chnl;
+ goto unmap_dma_resource;
}
kfree(cdns_ctrl->buf);
cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
if (!cdns_ctrl->buf) {
ret = -ENOMEM;
- goto dma_release_chnl;
+ goto unmap_dma_resource;
}
return 0;
+unmap_dma_resource:
+ dma_unmap_resource(dma_dev->dev, cdns_ctrl->io.iova_dma,
+ cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0);
+
dma_release_chnl:
if (cdns_ctrl->dmac)
dma_release_channel(cdns_ctrl->dmac);
@@ -2915,6 +2931,8 @@ free_buf_desc:
static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
{
cadence_nand_chips_cleanup(cdns_ctrl);
+ dma_unmap_resource(cdns_ctrl->dmac->device->dev, cdns_ctrl->io.iova_dma,
+ cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0);
cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
kfree(cdns_ctrl->buf);
dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
@@ -2983,7 +3001,9 @@ static int cadence_nand_dt_probe(struct platform_device *ofdev)
cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res);
if (IS_ERR(cdns_ctrl->io.virt))
return PTR_ERR(cdns_ctrl->io.virt);
+
cdns_ctrl->io.dma = res->start;
+ cdns_ctrl->io.size = resource_size(res);
dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
if (IS_ERR(dt->clk))
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 1aa578c1ca4a..8d66de71ea60 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -1271,6 +1271,8 @@ struct macb {
struct clk *rx_clk;
struct clk *tsu_clk;
struct net_device *dev;
+ /* Protects hw_stats and ethtool_stats */
+ spinlock_t stats_lock;
union {
struct macb_stats macb;
struct gem_stats gem;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d44d53d69762..fc3342944dbc 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1936,10 +1936,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(ISR_ROVR)) {
/* We missed at least one packet */
+ spin_lock(&bp->stats_lock);
if (macb_is_gem(bp))
bp->hw_stats.gem.rx_overruns++;
else
bp->hw_stats.macb.rx_overruns++;
+ spin_unlock(&bp->stats_lock);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
@@ -2999,6 +3001,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
if (!netif_running(bp->dev))
return nstat;
+ spin_lock_irq(&bp->stats_lock);
gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
@@ -3028,6 +3031,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
nstat->tx_fifo_errors = hwstat->tx_underrun;
+ spin_unlock_irq(&bp->stats_lock);
return nstat;
}
@@ -3035,12 +3039,13 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
static void gem_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
- struct macb *bp;
+ struct macb *bp = netdev_priv(dev);
- bp = netdev_priv(dev);
+ spin_lock_irq(&bp->stats_lock);
gem_update_stats(bp);
memcpy(data, &bp->ethtool_stats, sizeof(u64)
* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
+ spin_unlock_irq(&bp->stats_lock);
}
static int gem_get_sset_count(struct net_device *dev, int sset)
@@ -3090,6 +3095,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
return gem_get_stats(bp);
/* read stats from hardware */
+ spin_lock_irq(&bp->stats_lock);
macb_update_stats(bp);
/* Convert HW stats into netdevice stats */
@@ -3123,6 +3129,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
nstat->tx_fifo_errors = hwstat->tx_underruns;
/* Don't know about heartbeat or window errors... */
+ spin_unlock_irq(&bp->stats_lock);
return nstat;
}
@@ -4949,6 +4956,7 @@ static int macb_probe(struct platform_device *pdev)
bp->usrio = macb_config->usrio;
spin_lock_init(&bp->lock);
+ spin_lock_init(&bp->stats_lock);
/* setup capabilities */
macb_configure_caps(bp, macb_config);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 19edb1ba6d66..230b317d93da 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -123,6 +123,24 @@ static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
return 0;
}
+/**
+ * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame
+ * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located
+ * @count: Number of Tx buffer descriptors which need to be unmapped
+ * @i: Index of the last successfully mapped Tx buffer descriptor
+ */
+static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i)
+{
+ while (count--) {
+ struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
+
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ }
+}
+
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
@@ -213,9 +231,11 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
}
if (do_onestep_tstamp) {
- u32 lo, hi, val;
- u64 sec, nsec;
+ __be32 new_sec_l, new_nsec;
+ u32 lo, hi, nsec, val;
+ __be16 new_sec_h;
u8 *data;
+ u64 sec;
lo = enetc_rd_hot(hw, ENETC_SICTR0);
hi = enetc_rd_hot(hw, ENETC_SICTR1);
@@ -229,13 +249,38 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
/* Update originTimestamp field of Sync packet
* - 48 bits seconds field
* - 32 bits nanseconds field
+ *
+ * In addition, the UDP checksum needs to be updated
+ * by software after updating originTimestamp field,
+ * otherwise the hardware will calculate the wrong
+ * checksum when updating the correction field and
+ * update it to the packet.
*/
data = skb_mac_header(skb);
- *(__be16 *)(data + offset2) =
- htons((sec >> 32) & 0xffff);
- *(__be32 *)(data + offset2 + 2) =
- htonl(sec & 0xffffffff);
- *(__be32 *)(data + offset2 + 6) = htonl(nsec);
+ new_sec_h = htons((sec >> 32) & 0xffff);
+ new_sec_l = htonl(sec & 0xffffffff);
+ new_nsec = htonl(nsec);
+ if (udp) {
+ struct udphdr *uh = udp_hdr(skb);
+ __be32 old_sec_l, old_nsec;
+ __be16 old_sec_h;
+
+ old_sec_h = *(__be16 *)(data + offset2);
+ inet_proto_csum_replace2(&uh->check, skb, old_sec_h,
+ new_sec_h, false);
+
+ old_sec_l = *(__be32 *)(data + offset2 + 2);
+ inet_proto_csum_replace4(&uh->check, skb, old_sec_l,
+ new_sec_l, false);
+
+ old_nsec = *(__be32 *)(data + offset2 + 6);
+ inet_proto_csum_replace4(&uh->check, skb, old_nsec,
+ new_nsec, false);
+ }
+
+ *(__be16 *)(data + offset2) = new_sec_h;
+ *(__be32 *)(data + offset2 + 2) = new_sec_l;
+ *(__be32 *)(data + offset2 + 6) = new_nsec;
/* Configure single-step register */
val = ENETC_PM0_SINGLE_STEP_EN;
@@ -306,25 +351,20 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
dma_err:
dev_err(tx_ring->dev, "DMA map error");
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
-static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
- struct enetc_tx_swbd *tx_swbd,
- union enetc_tx_bd *txbd, int *i, int hdr_len,
- int data_len)
+static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, int *i, int hdr_len,
+ int data_len)
{
union enetc_tx_bd txbd_tmp;
u8 flags = 0, e_flags = 0;
dma_addr_t addr;
+ int count = 1;
enetc_clear_tx_bd(&txbd_tmp);
addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
@@ -367,7 +407,10 @@ static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
/* Write the BD */
txbd_tmp.ext.e_flags = e_flags;
*txbd = txbd_tmp;
+ count++;
}
+
+ return count;
}
static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
@@ -499,9 +542,9 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
/* compute the csum over the L4 header */
csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
- enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
+ count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd,
+ &i, hdr_len, data_len);
bd_data_num = 0;
- count++;
while (data_len > 0) {
int size;
@@ -525,8 +568,13 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
tso.data, size,
size == data_len);
- if (err)
+ if (err) {
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+
goto err_map_data;
+ }
data_len -= size;
count++;
@@ -555,13 +603,7 @@ err_map_data:
dev_err(tx_ring->dev, "DMA map error");
err_chained_bd:
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
@@ -1581,7 +1623,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_xdp_drop(rx_ring, orig_i, i);
tx_ring->stats.xdp_tx_drops++;
} else {
- tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
+ tx_ring->stats.xdp_tx++;
rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
xdp_tx_frm_cnt++;
/* The XDP_TX enqueue was successful, so we
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 6d17738c1c53..44991cae9404 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -116,6 +116,7 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb);
static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
static void flush_reset_queue(struct ibmvnic_adapter *adapter);
+static void print_subcrq_error(struct device *dev, int rc, const char *func);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -2135,7 +2136,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
tx_buff = &tx_pool->tx_buff[index];
adapter->netdev->stats.tx_packets--;
adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
- adapter->tx_stats_buffers[queue_num].packets--;
+ adapter->tx_stats_buffers[queue_num].batched_packets--;
adapter->tx_stats_buffers[queue_num].bytes -=
tx_buff->skb->len;
dev_kfree_skb_any(tx_buff->skb);
@@ -2160,8 +2161,29 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
}
}
+static int send_subcrq_direct(struct ibmvnic_adapter *adapter,
+ u64 remote_handle, u64 *entry)
+{
+ unsigned int ua = adapter->vdev->unit_address;
+ struct device *dev = &adapter->vdev->dev;
+ int rc;
+
+ /* Make sure the hypervisor sees the complete request */
+ dma_wmb();
+ rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
+ cpu_to_be64(remote_handle),
+ cpu_to_be64(entry[0]), cpu_to_be64(entry[1]),
+ cpu_to_be64(entry[2]), cpu_to_be64(entry[3]));
+
+ if (rc)
+ print_subcrq_error(dev, rc, __func__);
+
+ return rc;
+}
+
static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
- struct ibmvnic_sub_crq_queue *tx_scrq)
+ struct ibmvnic_sub_crq_queue *tx_scrq,
+ bool indirect)
{
struct ibmvnic_ind_xmit_queue *ind_bufp;
u64 dma_addr;
@@ -2176,12 +2198,18 @@ static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
if (!entries)
return 0;
- rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
+
+ if (indirect)
+ rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
+ else
+ rc = send_subcrq_direct(adapter, handle,
+ (u64 *)ind_bufp->indir_arr);
+
if (rc)
ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
else
ind_bufp->index = 0;
- return 0;
+ return rc;
}
static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
@@ -2200,11 +2228,13 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int tx_map_failed = 0;
union sub_crq indir_arr[16];
unsigned int tx_dropped = 0;
- unsigned int tx_packets = 0;
+ unsigned int tx_dpackets = 0;
+ unsigned int tx_bpackets = 0;
unsigned int tx_bytes = 0;
dma_addr_t data_dma_addr;
struct netdev_queue *txq;
unsigned long lpar_rc;
+ unsigned int skblen;
union sub_crq tx_crq;
unsigned int offset;
int num_entries = 1;
@@ -2234,7 +2264,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_dropped++;
tx_send_failed++;
ret = NETDEV_TX_OK;
- ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
+ if (lpar_rc != H_SUCCESS)
+ goto tx_err;
goto out;
}
@@ -2249,8 +2281,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
dev_kfree_skb_any(skb);
tx_send_failed++;
tx_dropped++;
- ibmvnic_tx_scrq_flush(adapter, tx_scrq);
ret = NETDEV_TX_OK;
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
+ if (lpar_rc != H_SUCCESS)
+ goto tx_err;
goto out;
}
@@ -2303,6 +2337,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_buff->skb = skb;
tx_buff->index = bufidx;
tx_buff->pool_index = queue_num;
+ skblen = skb->len;
memset(&tx_crq, 0, sizeof(tx_crq));
tx_crq.v1.first = IBMVNIC_CRQ_CMD;
@@ -2346,6 +2381,17 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
hdrs += 2;
+ } else if (!ind_bufp->index && !netdev_xmit_more()) {
+ ind_bufp->indir_arr[0] = tx_crq;
+ ind_bufp->index = 1;
+ tx_buff->num_entries = 1;
+ netdev_tx_sent_queue(txq, skb->len);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, false);
+ if (lpar_rc != H_SUCCESS)
+ goto tx_err;
+
+ tx_dpackets++;
+ goto early_exit;
}
if ((*hdrs >> 7) & 1)
@@ -2355,7 +2401,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_buff->num_entries = num_entries;
/* flush buffer if current entry can not fit */
if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
- lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_flush_err;
}
@@ -2363,23 +2409,26 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
indir_arr[0] = tx_crq;
memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
num_entries * sizeof(struct ibmvnic_generic_scrq));
+
ind_bufp->index += num_entries;
if (__netdev_tx_sent_queue(txq, skb->len,
netdev_xmit_more() &&
ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
- lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_err;
}
+ tx_bpackets++;
+
+early_exit:
if (atomic_add_return(num_entries, &tx_scrq->used)
>= adapter->req_tx_entries_per_subcrq) {
netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
netif_stop_subqueue(netdev, queue_num);
}
- tx_packets++;
- tx_bytes += skb->len;
+ tx_bytes += skblen;
txq_trans_cond_update(txq);
ret = NETDEV_TX_OK;
goto out;
@@ -2408,10 +2457,11 @@ out:
rcu_read_unlock();
netdev->stats.tx_dropped += tx_dropped;
netdev->stats.tx_bytes += tx_bytes;
- netdev->stats.tx_packets += tx_packets;
+ netdev->stats.tx_packets += tx_bpackets + tx_dpackets;
adapter->tx_send_failed += tx_send_failed;
adapter->tx_map_failed += tx_map_failed;
- adapter->tx_stats_buffers[queue_num].packets += tx_packets;
+ adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets;
+ adapter->tx_stats_buffers[queue_num].direct_packets += tx_dpackets;
adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
@@ -3577,7 +3627,10 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
for (i = 0; i < adapter->req_tx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_batched_packets", i);
+ data += ETH_GSTRING_LEN;
+
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_direct_packets", i);
data += ETH_GSTRING_LEN;
snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
@@ -3642,7 +3695,9 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
(adapter, ibmvnic_stats[i].offset));
for (j = 0; j < adapter->req_tx_queues; j++) {
- data[i] = adapter->tx_stats_buffers[j].packets;
+ data[i] = adapter->tx_stats_buffers[j].batched_packets;
+ i++;
+ data[i] = adapter->tx_stats_buffers[j].direct_packets;
i++;
data[i] = adapter->tx_stats_buffers[j].bytes;
i++;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index e5c6ff3d0c47..f923cdab03f5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -213,7 +213,8 @@ struct ibmvnic_statistics {
#define NUM_TX_STATS 3
struct ibmvnic_tx_queue_stats {
- u64 packets;
+ u64 batched_packets;
+ u64 direct_packets;
u64 bytes;
u64 dropped_packets;
};
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 40aeaa7bd739..d2757cc11613 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -324,7 +324,7 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
MVPP2_PRS_RI_VLAN_MASK),
/* Non IP flow, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
- MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_TAGGED,
0, 0),
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index a6d3fc96e168..10b9dc2aaf06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -513,7 +513,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
- name, size, start);
+ name ? name : "mlx5_pcif_pool", size, start);
return pool;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index 2ec62c8d86e1..59486fe2ad18 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -20,6 +20,8 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
struct sk_buff *skb;
skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
skb_put(skb, size);
return skb;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index f53f84bd03db..cd01ed9dcf2f 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2159,6 +2159,7 @@ static int axienet_probe(struct platform_device *pdev)
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
+ lp->phylink_config.mac_managed_pm = true;
lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
MAC_10FD | MAC_100FD | MAC_1000FD;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 27b570678c9f..3dd5c69b05cb 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1965,21 +1965,9 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *geneve, *next;
- struct net_device *dev, *aux;
- /* gather any geneve devices that were moved into this ns */
- for_each_netdev_safe(net, dev, aux)
- if (dev->rtnl_link_ops == &geneve_link_ops)
- unregister_netdevice_queue(dev, head);
-
- /* now gather any other geneve devices that were created in this ns */
- list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
- /* If geneve->dev is in the same netns, it was already added
- * to the list by the previous loop.
- */
- if (!net_eq(dev_net(geneve->dev), net))
- unregister_netdevice_queue(geneve->dev, head);
- }
+ list_for_each_entry_safe(geneve, next, &gn->geneve_list, next)
+ geneve_dellink(geneve->dev, head);
}
static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 0de3dcd07cb7..797886f10868 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1894,11 +1894,6 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
list_for_each_entry(net, net_list, exit_list) {
struct gtp_net *gn = net_generic(net, gtp_net_id);
struct gtp_dev *gtp, *gtp_next;
- struct net_device *dev;
-
- for_each_netdev(net, dev)
- if (dev->rtnl_link_ops == &gtp_link_ops)
- gtp_dellink(dev, dev_to_kill);
list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
gtp_dellink(gtp->dev, dev_to_kill);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 1d49771d07f4..eea81a733405 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -2,6 +2,9 @@
/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
*/
+#include <net/inet_dscp.h>
+#include <net/ip.h>
+
#include "ipvlan.h"
static u32 ipvlan_jhash_secret __read_mostly;
@@ -413,20 +416,25 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
{
- const struct iphdr *ip4h = ip_hdr(skb);
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
- struct rtable *rt;
int err, ret = NET_XMIT_DROP;
+ const struct iphdr *ip4h;
+ struct rtable *rt;
struct flowi4 fl4 = {
.flowi4_oif = dev->ifindex,
- .flowi4_tos = RT_TOS(ip4h->tos),
.flowi4_flags = FLOWI_FLAG_ANYSRC,
.flowi4_mark = skb->mark,
- .daddr = ip4h->daddr,
- .saddr = ip4h->saddr,
};
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ goto err;
+
+ ip4h = ip_hdr(skb);
+ fl4.daddr = ip4h->daddr;
+ fl4.saddr = ip4h->saddr;
+ fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h));
+
rt = ip_route_output_flow(net, &fl4, NULL);
if (IS_ERR(rt))
goto err;
@@ -485,6 +493,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
struct net_device *dev = skb->dev;
int err, ret = NET_XMIT_DROP;
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) {
+ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ return ret;
+ }
+
err = ipvlan_route_v6_outbound(dev, skb);
if (unlikely(err)) {
DEV_STATS_INC(dev, tx_errors);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index f6d53e63ef4e..292cd8a113f4 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -246,8 +246,22 @@ static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static int blackhole_neigh_output(struct neighbour *n, struct sk_buff *skb)
+{
+ kfree_skb(skb);
+ return 0;
+}
+
+static int blackhole_neigh_construct(struct net_device *dev,
+ struct neighbour *n)
+{
+ n->output = blackhole_neigh_output;
+ return 0;
+}
+
static const struct net_device_ops blackhole_netdev_ops = {
.ndo_start_xmit = blackhole_netdev_xmit,
+ .ndo_neigh_construct = blackhole_neigh_construct,
};
/* This is a dst-dummy device used specifically for invalidated
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index 46af78caf457..0bfa37c14059 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -179,9 +179,7 @@ static int genelink_bind(struct usbnet *dev, struct usb_interface *intf)
{
dev->hard_mtu = GL_RCV_BUF_SIZE;
dev->net->hard_header_len += 4;
- dev->in = usb_rcvbulkpipe(dev->udev, dev->driver_info->in);
- dev->out = usb_sndbulkpipe(dev->udev, dev->driver_info->out);
- return 0;
+ return usbnet_get_endpoints(dev, intf);
}
static const struct driver_info genelink_info = {
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index a02873792890..acf73a91e87e 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -262,8 +262,7 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
{
if (ns && nsid != ns->head->ns_id) {
dev_err(ctrl->device,
- "%s: nsid (%u) in cmd does not match nsid (%u)"
- "of namespace\n",
+ "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
current->comm, nsid, ns->head->ns_id);
return false;
}
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index d97a7164c496..2c73cc8dd1ed 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -299,7 +299,10 @@ static int rockchip_combphy_parse_dt(struct device *dev, struct rockchip_combphy
priv->ext_refclk = device_property_present(dev, "rockchip,ext-refclk");
- priv->phy_rst = devm_reset_control_get(dev, "phy");
+ priv->phy_rst = devm_reset_control_get_exclusive(dev, "phy");
+ /* fallback to old behaviour */
+ if (PTR_ERR(priv->phy_rst) == -ENOENT)
+ priv->phy_rst = devm_reset_control_array_get_exclusive(dev);
if (IS_ERR(priv->phy_rst))
return dev_err_probe(dev, PTR_ERR(priv->phy_rst), "failed to get phy reset\n");
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index ee0848fe8432..ca6101ef6076 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -288,9 +288,9 @@ exynos5_usbdrd_pipe3_set_refclk(struct phy_usb_instance *inst)
reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK;
/* FSEL settings corresponding to reference clock */
- reg &= ~PHYCLKRST_FSEL_PIPE_MASK |
- PHYCLKRST_MPLL_MULTIPLIER_MASK |
- PHYCLKRST_SSC_REFCLKSEL_MASK;
+ reg &= ~(PHYCLKRST_FSEL_PIPE_MASK |
+ PHYCLKRST_MPLL_MULTIPLIER_MASK |
+ PHYCLKRST_SSC_REFCLKSEL_MASK);
switch (phy_drd->extrefclk) {
case EXYNOS5_FSEL_50MHZ:
reg |= (PHYCLKRST_MPLL_MULTIPLIER_50M_REF |
@@ -332,9 +332,9 @@ exynos5_usbdrd_utmi_set_refclk(struct phy_usb_instance *inst)
reg &= ~PHYCLKRST_REFCLKSEL_MASK;
reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK;
- reg &= ~PHYCLKRST_FSEL_UTMI_MASK |
- PHYCLKRST_MPLL_MULTIPLIER_MASK |
- PHYCLKRST_SSC_REFCLKSEL_MASK;
+ reg &= ~(PHYCLKRST_FSEL_UTMI_MASK |
+ PHYCLKRST_MPLL_MULTIPLIER_MASK |
+ PHYCLKRST_SSC_REFCLKSEL_MASK);
reg |= PHYCLKRST_FSEL(phy_drd->extrefclk);
return reg;
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index 0996ede63387..0556a57f3ddf 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -900,6 +900,7 @@ static int tegra186_utmi_phy_exit(struct phy *phy)
unsigned int index = lane->index;
struct device *dev = padctl->dev;
int err;
+ u32 reg;
port = tegra_xusb_find_usb2_port(padctl, index);
if (!port) {
@@ -907,6 +908,16 @@ static int tegra186_utmi_phy_exit(struct phy *phy)
return -ENODEV;
}
+ if (port->mode == USB_DR_MODE_OTG ||
+ port->mode == USB_DR_MODE_PERIPHERAL) {
+ /* reset VBUS&ID OVERRIDE */
+ reg = padctl_readl(padctl, USB2_VBUS_ID);
+ reg &= ~VBUS_OVERRIDE;
+ reg &= ~ID_OVERRIDE(~0);
+ reg |= ID_OVERRIDE_FLOATING;
+ padctl_writel(padctl, reg, USB2_VBUS_ID);
+ }
+
if (port->supply && port->mode == USB_DR_MODE_HOST) {
err = regulator_disable(port->supply);
if (err) {
diff --git a/drivers/power/supply/da9150-fg.c b/drivers/power/supply/da9150-fg.c
index 8c5e2c49d6c1..9d0a6ab698f5 100644
--- a/drivers/power/supply/da9150-fg.c
+++ b/drivers/power/supply/da9150-fg.c
@@ -248,9 +248,9 @@ static int da9150_fg_current_avg(struct da9150_fg *fg,
DA9150_QIF_SD_GAIN_SIZE);
da9150_fg_read_sync_end(fg);
- div = (u64) (sd_gain * shunt_val * 65536ULL);
+ div = 65536ULL * sd_gain * shunt_val;
do_div(div, 1000000);
- res = (u64) (iavg * 1000000ULL);
+ res = 1000000ULL * iavg;
do_div(res, div);
val->intval = (int) res;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 5c5954b78585..8e75eb1b6eab 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -782,12 +782,18 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
case 0x1a: /* start stop unit in progress */
case 0x1b: /* sanitize in progress */
case 0x1d: /* configuration in progress */
- case 0x24: /* depopulation in progress */
action = ACTION_DELAYED_RETRY;
break;
case 0x0a: /* ALUA state transition */
action = ACTION_DELAYED_REPREP;
break;
+ /*
+ * Depopulation might take many hours,
+ * thus it is not worthwhile to retry.
+ */
+ case 0x24: /* depopulation in progress */
+ case 0x25: /* depopulation restore in progress */
+ fallthrough;
default:
action = ACTION_FAIL;
break;
@@ -1573,13 +1579,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
if (in_flight)
__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
- /*
- * Only clear the driver-private command data if the LLD does not supply
- * a function to initialize that data.
- */
- if (!shost->hostt->init_cmd_priv)
- memset(cmd + 1, 0, shost->hostt->cmd_size);
-
cmd->prot_op = SCSI_PROT_NORMAL;
if (blk_rq_bytes(req))
cmd->sc_data_direction = rq_dma_dir(req);
@@ -1741,6 +1740,13 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
if (!scsi_host_queue_ready(q, shost, sdev, cmd))
goto out_dec_target_busy;
+ /*
+ * Only clear the driver-private command data if the LLD does not supply
+ * a function to initialize that data.
+ */
+ if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv)
+ memset(cmd + 1, 0, shost->hostt->cmd_size);
+
if (!(req->rq_flags & RQF_DONTPREP)) {
ret = scsi_prepare_cmd(req);
if (ret != BLK_STS_OK)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b35ef52d9c63..c3006524eb03 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2191,6 +2191,10 @@ sd_spinup_disk(struct scsi_disk *sdkp)
break; /* unavailable */
if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
break; /* sanitize in progress */
+ if (sshdr.asc == 4 && sshdr.ascq == 0x24)
+ break; /* depopulation in progress */
+ if (sshdr.asc == 4 && sshdr.ascq == 0x25)
+ break; /* depopulation restoration in progress */
/*
* Issue command to spin up drive when not ready
*/
diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c
index fc13334db1b1..7269ab8d29b6 100644
--- a/drivers/soc/mediatek/mtk-devapc.c
+++ b/drivers/soc/mediatek/mtk-devapc.c
@@ -273,44 +273,44 @@ static int mtk_devapc_probe(struct platform_device *pdev)
return -EINVAL;
devapc_irq = irq_of_parse_and_map(node, 0);
- if (!devapc_irq)
- return -EINVAL;
-
- ctx->infra_clk = devm_clk_get(&pdev->dev, "devapc-infra-clock");
- if (IS_ERR(ctx->infra_clk))
- return -EINVAL;
+ if (!devapc_irq) {
+ ret = -EINVAL;
+ goto err;
+ }
- if (clk_prepare_enable(ctx->infra_clk))
- return -EINVAL;
+ ctx->infra_clk = devm_clk_get_enabled(&pdev->dev, "devapc-infra-clock");
+ if (IS_ERR(ctx->infra_clk)) {
+ ret = -EINVAL;
+ goto err;
+ }
ret = devm_request_irq(&pdev->dev, devapc_irq, devapc_violation_irq,
IRQF_TRIGGER_NONE, "devapc", ctx);
- if (ret) {
- clk_disable_unprepare(ctx->infra_clk);
- return ret;
- }
+ if (ret)
+ goto err;
platform_set_drvdata(pdev, ctx);
start_devapc(ctx);
return 0;
+
+err:
+ iounmap(ctx->infra_base);
+ return ret;
}
-static int mtk_devapc_remove(struct platform_device *pdev)
+static void mtk_devapc_remove(struct platform_device *pdev)
{
struct mtk_devapc_context *ctx = platform_get_drvdata(pdev);
stop_devapc(ctx);
-
- clk_disable_unprepare(ctx->infra_clk);
-
- return 0;
+ iounmap(ctx->infra_base);
}
static struct platform_driver mtk_devapc_driver = {
.probe = mtk_devapc_probe,
- .remove = mtk_devapc_remove,
+ .remove_new = mtk_devapc_remove,
.driver = {
.name = "mtk-devapc",
.of_match_table = mtk_devapc_dt_match,
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index b5afe5790b1d..8540fcdf3b42 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -138,11 +138,15 @@
#define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8)
#define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC)
+#define ATMEL_QSPI_TIMEOUT 1000 /* ms */
+
struct atmel_qspi_caps {
bool has_qspick;
bool has_ricr;
};
+struct atmel_qspi_ops;
+
struct atmel_qspi {
void __iomem *regs;
void __iomem *mem;
@@ -150,13 +154,22 @@ struct atmel_qspi {
struct clk *qspick;
struct platform_device *pdev;
const struct atmel_qspi_caps *caps;
+ const struct atmel_qspi_ops *ops;
resource_size_t mmap_size;
u32 pending;
+ u32 irq_mask;
u32 mr;
u32 scr;
struct completion cmd_completion;
};
+struct atmel_qspi_ops {
+ int (*set_cfg)(struct atmel_qspi *aq, const struct spi_mem_op *op,
+ u32 *offset);
+ int (*transfer)(struct spi_mem *mem, const struct spi_mem_op *op,
+ u32 offset);
+};
+
struct atmel_qspi_mode {
u8 cmd_buswidth;
u8 addr_buswidth;
@@ -375,9 +388,9 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
* If the QSPI controller is set in regular SPI mode, set it in
* Serial Memory Mode (SMM).
*/
- if (aq->mr != QSPI_MR_SMM) {
- atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
- aq->mr = QSPI_MR_SMM;
+ if (!(aq->mr & QSPI_MR_SMM)) {
+ aq->mr |= QSPI_MR_SMM;
+ atmel_qspi_write(aq->mr, aq, QSPI_MR);
}
/* Clear pending interrupts */
@@ -404,10 +417,67 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
return 0;
}
+static int atmel_qspi_wait_for_completion(struct atmel_qspi *aq, u32 irq_mask)
+{
+ int err = 0;
+ u32 sr;
+
+ /* Poll INSTRuction End status */
+ sr = atmel_qspi_read(aq, QSPI_SR);
+ if ((sr & irq_mask) == irq_mask)
+ return 0;
+
+ /* Wait for INSTRuction End interrupt */
+ reinit_completion(&aq->cmd_completion);
+ aq->pending = sr & irq_mask;
+ aq->irq_mask = irq_mask;
+ atmel_qspi_write(irq_mask, aq, QSPI_IER);
+ if (!wait_for_completion_timeout(&aq->cmd_completion,
+ msecs_to_jiffies(ATMEL_QSPI_TIMEOUT)))
+ err = -ETIMEDOUT;
+ atmel_qspi_write(irq_mask, aq, QSPI_IDR);
+
+ return err;
+}
+
+static int atmel_qspi_transfer(struct spi_mem *mem,
+ const struct spi_mem_op *op, u32 offset)
+{
+ struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
+
+ /* Skip to the final steps if there is no data */
+ if (!op->data.nbytes)
+ return atmel_qspi_wait_for_completion(aq,
+ QSPI_SR_CMD_COMPLETED);
+
+ /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
+ (void)atmel_qspi_read(aq, QSPI_IFR);
+
+ /* Send/Receive data */
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ memcpy_fromio(op->data.buf.in, aq->mem + offset,
+ op->data.nbytes);
+
+ /* Synchronize AHB and APB accesses again */
+ rmb();
+ } else {
+ memcpy_toio(aq->mem + offset, op->data.buf.out,
+ op->data.nbytes);
+
+ /* Synchronize AHB and APB accesses again */
+ wmb();
+ }
+
+ /* Release the chip-select */
+ atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
+
+ return atmel_qspi_wait_for_completion(aq, QSPI_SR_CMD_COMPLETED);
+}
+
static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
- struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master);
- u32 sr, offset;
+ struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
+ u32 offset;
int err;
/*
@@ -416,46 +486,20 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
* when the flash memories overrun the controller's memory space.
*/
if (op->addr.val + op->data.nbytes > aq->mmap_size)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+
+ if (op->addr.nbytes > 4)
+ return -EOPNOTSUPP;
err = pm_runtime_resume_and_get(&aq->pdev->dev);
if (err < 0)
return err;
- err = atmel_qspi_set_cfg(aq, op, &offset);
+ err = aq->ops->set_cfg(aq, op, &offset);
if (err)
goto pm_runtime_put;
- /* Skip to the final steps if there is no data */
- if (op->data.nbytes) {
- /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
- (void)atmel_qspi_read(aq, QSPI_IFR);
-
- /* Send/Receive data */
- if (op->data.dir == SPI_MEM_DATA_IN)
- memcpy_fromio(op->data.buf.in, aq->mem + offset,
- op->data.nbytes);
- else
- memcpy_toio(aq->mem + offset, op->data.buf.out,
- op->data.nbytes);
-
- /* Release the chip-select */
- atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
- }
-
- /* Poll INSTRuction End status */
- sr = atmel_qspi_read(aq, QSPI_SR);
- if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
- goto pm_runtime_put;
-
- /* Wait for INSTRuction End interrupt */
- reinit_completion(&aq->cmd_completion);
- aq->pending = sr & QSPI_SR_CMD_COMPLETED;
- atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IER);
- if (!wait_for_completion_timeout(&aq->cmd_completion,
- msecs_to_jiffies(1000)))
- err = -ETIMEDOUT;
- atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR);
+ err = aq->ops->transfer(mem, op, offset);
pm_runtime_put:
pm_runtime_mark_last_busy(&aq->pdev->dev);
@@ -476,7 +520,7 @@ static const struct spi_controller_mem_ops atmel_qspi_mem_ops = {
static int atmel_qspi_setup(struct spi_device *spi)
{
- struct spi_controller *ctrl = spi->master;
+ struct spi_controller *ctrl = spi->controller;
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
unsigned long src_rate;
u32 scbr;
@@ -501,7 +545,42 @@ static int atmel_qspi_setup(struct spi_device *spi)
if (ret < 0)
return ret;
- aq->scr = QSPI_SCR_SCBR(scbr);
+ aq->scr &= ~QSPI_SCR_SCBR_MASK;
+ aq->scr |= QSPI_SCR_SCBR(scbr);
+ atmel_qspi_write(aq->scr, aq, QSPI_SCR);
+
+ pm_runtime_mark_last_busy(ctrl->dev.parent);
+ pm_runtime_put_autosuspend(ctrl->dev.parent);
+
+ return 0;
+}
+
+static int atmel_qspi_set_cs_timing(struct spi_device *spi)
+{
+ struct spi_controller *ctrl = spi->controller;
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ unsigned long clk_rate;
+ u32 cs_setup;
+ int delay;
+ int ret;
+
+ delay = spi_delay_to_ns(&spi->cs_setup, NULL);
+ if (delay <= 0)
+ return delay;
+
+ clk_rate = clk_get_rate(aq->pclk);
+ if (!clk_rate)
+ return -EINVAL;
+
+ cs_setup = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)),
+ 1000);
+
+ ret = pm_runtime_resume_and_get(ctrl->dev.parent);
+ if (ret < 0)
+ return ret;
+
+ aq->scr &= ~QSPI_SCR_DLYBS_MASK;
+ aq->scr |= QSPI_SCR_DLYBS(cs_setup);
atmel_qspi_write(aq->scr, aq, QSPI_SCR);
pm_runtime_mark_last_busy(ctrl->dev.parent);
@@ -516,8 +595,8 @@ static void atmel_qspi_init(struct atmel_qspi *aq)
atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
/* Set the QSPI controller by default in Serial Memory Mode */
- atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
- aq->mr = QSPI_MR_SMM;
+ aq->mr |= QSPI_MR_SMM;
+ atmel_qspi_write(aq->mr, aq, QSPI_MR);
/* Enable the QSPI controller */
atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
@@ -536,12 +615,17 @@ static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
return IRQ_NONE;
aq->pending |= pending;
- if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
+ if ((aq->pending & aq->irq_mask) == aq->irq_mask)
complete(&aq->cmd_completion);
return IRQ_HANDLED;
}
+static const struct atmel_qspi_ops atmel_qspi_ops = {
+ .set_cfg = atmel_qspi_set_cfg,
+ .transfer = atmel_qspi_transfer,
+};
+
static int atmel_qspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctrl;
@@ -549,12 +633,13 @@ static int atmel_qspi_probe(struct platform_device *pdev)
struct resource *res;
int irq, err = 0;
- ctrl = devm_spi_alloc_master(&pdev->dev, sizeof(*aq));
+ ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*aq));
if (!ctrl)
return -ENOMEM;
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
ctrl->setup = atmel_qspi_setup;
+ ctrl->set_cs_timing = atmel_qspi_set_cs_timing;
ctrl->bus_num = -1;
ctrl->mem_ops = &atmel_qspi_mem_ops;
ctrl->num_chipselect = 1;
@@ -565,6 +650,7 @@ static int atmel_qspi_probe(struct platform_device *pdev)
init_completion(&aq->cmd_completion);
aq->pdev = pdev;
+ aq->ops = &atmel_qspi_ops;
/* Map the registers */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
index 322a543b8c27..d0f397c90242 100644
--- a/drivers/tee/optee/supp.c
+++ b/drivers/tee/optee/supp.c
@@ -80,7 +80,6 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_supp *supp = &optee->supp;
struct optee_supp_req *req;
- bool interruptable;
u32 ret;
/*
@@ -111,36 +110,18 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
/*
* Wait for supplicant to process and return result, once we've
* returned from wait_for_completion(&req->c) successfully we have
- * exclusive access again.
+ * exclusive access again. Allow the wait to be killable such that
+ * the wait doesn't turn into an indefinite state if the supplicant
+ * gets hung for some reason.
*/
- while (wait_for_completion_interruptible(&req->c)) {
+ if (wait_for_completion_killable(&req->c)) {
mutex_lock(&supp->mutex);
- interruptable = !supp->ctx;
- if (interruptable) {
- /*
- * There's no supplicant available and since the
- * supp->mutex currently is held none can
- * become available until the mutex released
- * again.
- *
- * Interrupting an RPC to supplicant is only
- * allowed as a way of slightly improving the user
- * experience in case the supplicant hasn't been
- * started yet. During normal operation the supplicant
- * will serve all requests in a timely manner and
- * interrupting then wouldn't make sense.
- */
- if (req->in_queue) {
- list_del(&req->link);
- req->in_queue = false;
- }
+ if (req->in_queue) {
+ list_del(&req->link);
+ req->in_queue = false;
}
mutex_unlock(&supp->mutex);
-
- if (interruptable) {
- req->ret = TEEC_ERROR_COMMUNICATION;
- break;
- }
+ req->ret = TEEC_ERROR_COMMUNICATION;
}
ret = req->ret;
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 1d8521103b66..dd1cfeeffb67 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -282,7 +282,7 @@ f_midi_complete(struct usb_ep *ep, struct usb_request *req)
/* Our transmit completed. See if there's more to go.
* f_midi_transmit eats req, don't queue it again. */
req->length = 0;
- f_midi_transmit(midi);
+ queue_work(system_highpri_wq, &midi->work);
return;
}
break;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 6d7f8e98ba2a..5adb6e831126 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1419,8 +1419,16 @@ int usb_add_gadget(struct usb_gadget *gadget)
if (ret)
goto err_free_id;
+ ret = sysfs_create_link(&udc->dev.kobj,
+ &gadget->dev.kobj, "gadget");
+ if (ret)
+ goto err_del_gadget;
+
return 0;
+ err_del_gadget:
+ device_del(&gadget->dev);
+
err_free_id:
ida_free(&gadget_id_numbers, gadget->id_number);
@@ -1529,8 +1537,9 @@ void usb_del_gadget(struct usb_gadget *gadget)
mutex_unlock(&udc_lock);
kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
- flush_work(&gadget->work);
+ sysfs_remove_link(&udc->dev.kobj, "gadget");
device_del(&gadget->dev);
+ flush_work(&gadget->work);
ida_free(&gadget_id_numbers, gadget->id_number);
cancel_work_sync(&udc->vbus_work);
device_unregister(&udc->dev);