F: drivers/iommu/arm/
F: drivers/iommu/io-pgtable-arm*
+ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS)
+M: Arnd Bergmann <arnd@arndb.de>
+M: Olof Johansson <olof@lixom.net>
+M: soc@kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git
+F: arch/arm/boot/dts/Makefile
+F: arch/arm64/boot/dts/Makefile
+
ARM SUB-ARCHITECTURES
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git
F: arch/arm/mach-*/
F: arch/arm/plat-*/
R: Song Liu <songliubraving@fb.com>
R: Yonghong Song <yhs@fb.com>
R: John Fastabend <john.fastabend@gmail.com>
-R: KP Singh <kpsingh@chromium.org>
+R: KP Singh <kpsingh@kernel.org>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Supported
X: arch/x86/net/bpf_jit_comp32.c
BPF LSM (Security Audit and Enforcement using BPF)
-M: KP Singh <kpsingh@chromium.org>
+M: KP Singh <kpsingh@kernel.org>
R: Florent Revest <revest@chromium.org>
R: Brendan Jackman <jackmanb@chromium.org>
L: bpf@vger.kernel.org
F: Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
F: drivers/net/ethernet/marvell/octeontx2/af/
+MARVELL PRESTERA ETHERNET SWITCH DRIVER
+M: Vadym Kochan <vkochan@marvell.com>
+M: Taras Chornyi <tchornyi@marvell.com>
+S: Supported
+W: https://github.com/Marvell-switching/switchdev-prestera
+F: drivers/net/ethernet/marvell/prestera/
+
MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER
M: Nicolas Pitre <nico@fluxnic.net>
S: Odd Fixes
pinctrl_i2c3: i2c3grp {
fsl,pins = <
- MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
+ MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1
MX6QDL_PAD_GPIO_16__I2C3_SDA 0x4001b8b1
>;
};
MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
- MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
>;
};
};
&ssp3 {
- /delete-property/ #address-cells;
- /delete-property/ #size-cells;
+ #address-cells = <0>;
spi-slave;
status = "okay";
ready-gpios = <&gpio 125 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-supply = <®_gmac_3v3>;
status = "okay";
};
/*
- * Copyright 2015 Adam Sampson <ats@offog.org>
+ * Copyright 2015-2020 Adam Sampson <ats@offog.org>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
/ {
model = "PineCube IP Camera";
- compatible = "pine64,pinecube", "allwinner,sun8i-s3";
+ compatible = "pine64,pinecube", "sochip,s3", "allwinner,sun8i-v3";
aliases {
serial0 = &uart2;
gic: interrupt-controller@1c81000 {
compatible = "arm,gic-400";
reg = <0x01c81000 0x1000>,
- <0x01c82000 0x1000>,
+ <0x01c82000 0x2000>,
<0x01c84000 0x2000>,
<0x01c86000 0x2000>;
interrupt-controller;
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-supply = <®_dc1sw>;
status = "okay";
};
};
®_dc1sw {
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-name = "vcc-gmac-phy";
};
®_dcdc1 {
regulator-always-on;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-name = "vcc-3v0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-3v3";
};
®_dcdc2 {
src_np = of_find_compatible_node(NULL, NULL,
"fsl,imx6ul-src");
- src_base = of_iomap(np, 0);
+ src_base = of_iomap(src_np, 0);
of_node_put(src_np);
WARN_ON(!src_base);
sbmr2 = readl_relaxed(src_base + SRC_SBMR2);
#ifndef __MEMORY_H
#define __MEMORY_H
-#define MAX_PHYSMEM_BITS 36
-#define SECTION_SIZE_BITS 34
-
#define KEYSTONE_LOW_PHYS_START 0x80000000ULL
#define KEYSTONE_LOW_PHYS_SIZE 0x80000000ULL /* 2G */
#define KEYSTONE_LOW_PHYS_END (KEYSTONE_LOW_PHYS_START + \
"allwinner,sun8i-h2-plus",
"allwinner,sun8i-h3",
"allwinner,sun8i-r40",
+ "allwinner,sun8i-v3",
"allwinner,sun8i-v3s",
NULL,
};
&emac {
pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ext_rgmii_phy>;
phy-supply = <®_dc1sw>;
status = "okay";
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <®_gmac_3v3>;
phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
&emac {
pinctrl-names = "default";
pinctrl-0 = <&ext_rgmii_pins>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ext_rgmii_phy>;
phy-supply = <®_gmac_3v3>;
allwinner,rx-delay-ps = <200>;
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-obj-y := fault.o mem.o pgtable.o mmap.o \
+obj-y := fault.o mem.o pgtable.o mmap.o maccess.o \
init_$(BITS).o pgtable_$(BITS).o \
pgtable-frag.o ioremap.o ioremap_$(BITS).o \
init-common.o mmu_context.o drmem.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+{
+ return is_kernel_addr((unsigned long)unsafe_src);
+}
FUNC_NAME: /* %o0=src, %o1=dst, %o2=len */
LOAD(prefetch, %o0 + 0x000, #n_reads)
xor %o0, %o1, %g1
- mov 1, %o3
+ mov -1, %o3
clr %o4
andcc %g1, 0x3, %g0
bne,pn %icc, 95f
depends on ARCH_MXC || COMPILE_TEST
config MXC_CLK_SCU
- tristate "IMX SCU clock"
- depends on ARCH_MXC || COMPILE_TEST
+ tristate
+ depends on ARCH_MXC
depends on IMX_SCU && HAVE_ARM_SMCCC
config CLK_IMX1
u16 sel, g1, r1, g2, r2;
} dual;
};
-} __packed;
+};
#define I_GATE(_clk, _rst, _rdy, _midle, _scon, _mirack, _mistat) \
{ .gate = _clk, .reset = _rst, \
#define PM_API_FEATURE_CHECK_MAX_ORDER 7
static bool feature_check_enabled;
-DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
+static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
/**
* struct pm_api_feature_data - PM API Feature data
ret = devm_gpiochip_add_data(&pdev->dev, &arizona_gpio->gpio_chip,
arizona_gpio);
if (ret < 0) {
+ pm_runtime_disable(&pdev->dev);
dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
ret);
return ret;
return err;
}
+ platform_set_drvdata(pdev, gpio);
+
return 0;
}
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
- continue;
+ break;
sprd_eic->base[i] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(sprd_eic->base[i]))
devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
+ /* Some MVEBU SoCs have simple PWM support for GPIO lines */
+ if (IS_ENABLED(CONFIG_PWM)) {
+ err = mvebu_pwm_probe(pdev, mvchip, id);
+ if (err)
+ return err;
+ }
+
/* Some gpio controllers do not provide irq support */
if (!have_irqs)
return 0;
if (!mvchip->domain) {
dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
mvchip->chip.label);
- return -ENODEV;
+ err = -ENODEV;
+ goto err_pwm;
}
err = irq_alloc_domain_generic_chips(
mvchip);
}
- /* Some MVEBU SoCs have simple PWM support for GPIO lines */
- if (IS_ENABLED(CONFIG_PWM))
- return mvebu_pwm_probe(pdev, mvchip, id);
-
return 0;
err_domain:
irq_domain_remove(mvchip->domain);
+err_pwm:
+ pwmchip_remove(&mvchip->mvpwm->chip);
return err;
}
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
int ret;
- ret = pm_runtime_get_sync(chip->parent);
+ ret = pm_runtime_resume_and_get(chip->parent);
if (ret < 0)
return ret;
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
goto err_pm_dis;
*/
void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset)
{
+#ifdef CONFIG_PINCTRL
+ if (list_empty(&gc->gpiodev->pin_ranges))
+ return;
+#endif
+
pinctrl_gpio_free(gc->gpiodev->base + offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_free);
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
+ struct drm_gem_object *gobj;
int ret;
memset(&bp, 0, sizeof(bp));
bp.type = ttm_bo_type_sg;
bp.resv = resv;
dma_resv_lock(resv, NULL);
- ret = amdgpu_bo_create(adev, &bp, &bo);
+ ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_CPU,
+ 0, ttm_bo_type_sg, resv, &gobj);
if (ret)
goto error;
+ bo = gem_to_amdgpu_bo(gobj);
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
if (dma_buf->ops != &amdgpu_dmabuf_ops)
bo->prime_shared_count = 1;
dma_resv_unlock(resv);
- return &bo->tbo.base;
+ return gobj;
error:
dma_resv_unlock(resv);
bp.type = type;
bp.resv = resv;
bp.preferred_domain = initial_domain;
-retry:
bp.flags = flags;
bp.domain = initial_domain;
r = amdgpu_bo_create(adev, &bp, &bo);
- if (r) {
- if (r != -ERESTARTSYS) {
- if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
- flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- goto retry;
- }
-
- if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
- initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
- goto retry;
- }
- DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
- size, initial_domain, alignment, r);
- }
+ if (r)
return r;
- }
+
*obj = &bo->tbo.base;
return 0;
uint64_t size = args->in.bo_size;
struct dma_resv *resv = NULL;
struct drm_gem_object *gobj;
- uint32_t handle;
+ uint32_t handle, initial_domain;
int r;
/* reject invalid gem flags */
resv = vm->root.base.bo->tbo.base.resv;
}
+retry:
+ initial_domain = (u32)(0xffffffff & args->in.domains);
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
- (u32)(0xffffffff & args->in.domains),
+ initial_domain,
flags, ttm_bo_type_device, resv, &gobj);
+ if (r) {
+ if (r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ }
+
+ if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
+ goto retry;
+ }
+ DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
+ size, initial_domain, args->in.alignment, r);
+ }
+ return r;
+ }
+
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
if (!r) {
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
else
size = amdgpu_gmc_get_vbios_fb_size(adev);
+ if (adev->mman.keep_stolen_vga_memory)
+ size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
+
/* set to 0 if the pre-OS buffer uses up most of vram */
if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
size = 0;
con->dir, &con->disable_ras_err_cnt_harvest);
}
-void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
+static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
struct ras_fs_if *head)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
{
-#if defined(CONFIG_DEBUG_FS)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
struct ras_fs_if fs_info;
* it won't be called in resume path, no need to check
* suspend and gpu reset status
*/
- if (!con)
+ if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
return;
amdgpu_ras_debugfs_create_ctrl_node(adev);
amdgpu_ras_debugfs_create(adev, &fs_info);
}
}
-#endif
}
-void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
+static void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head)
{
struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
{
-#if defined(CONFIG_DEBUG_FS)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj, *tmp;
}
con->dir = NULL;
-#endif
}
/* debugfs end */
static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
{
- amdgpu_ras_debugfs_remove_all(adev);
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ amdgpu_ras_debugfs_remove_all(adev);
amdgpu_ras_sysfs_remove_all(adev);
return 0;
}
int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head);
-void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
- struct ras_fs_if *head);
-
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev);
-void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
- struct ras_common_if *head);
-
int amdgpu_ras_error_query(struct amdgpu_device *adev,
struct ras_query_if *info);
if (err)
goto out;
- err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[0]);
+ err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[i]);
if (err)
goto out;
}
}
mutex_unlock(&p->mutex);
+ dma_buf_put(dmabuf);
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
err_unlock:
mutex_unlock(&p->mutex);
+ dma_buf_put(dmabuf);
return r;
}
goto error;
}
- /* Update the actual used number of crtc */
- adev->mode_info.num_crtc = adev->dm.display_indexes_num;
-
/* create fake encoders for MST */
dm_dp_create_fake_mst_encoders(adev);
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
+ dm->display_indexes_num = dm->dc->caps.max_streams;
+ /* Update the actual used number of crtc */
+ adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
DRM_ERROR("DM: Failed to initialize mode config\n");
goto fail;
}
- dm->display_indexes_num = dm->dc->caps.max_streams;
-
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
};
-static struct wm_table ddr4_wm_table = {
+static struct wm_table ddr4_wm_table_gs = {
.entries = {
{
.wm_inst = WM_A,
}
};
-static struct wm_table lpddr4_wm_table = {
+static struct wm_table lpddr4_wm_table_gs = {
.entries = {
{
.wm_inst = WM_A,
}
};
+static struct wm_table ddr4_wm_table_rn = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 9.09,
+ .sr_enter_plus_exit_time_us = 10.14,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 10.12,
+ .sr_enter_plus_exit_time_us = 11.48,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 10.12,
+ .sr_enter_plus_exit_time_us = 11.48,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 10.12,
+ .sr_enter_plus_exit_time_us = 11.48,
+ .valid = true,
+ },
+ }
+};
+
+static struct wm_table lpddr4_wm_table_rn = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 7.32,
+ .sr_enter_plus_exit_time_us = 8.38,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 9.82,
+ .sr_enter_plus_exit_time_us = 11.196,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 9.89,
+ .sr_enter_plus_exit_time_us = 11.24,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 9.748,
+ .sr_enter_plus_exit_time_us = 11.102,
+ .valid = true,
+ },
+ }
+};
+
static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
{
int i;
struct dc_debug_options *debug = &ctx->dc->debug;
struct dpm_clocks clock_table = { 0 };
enum pp_smu_status status = 0;
+ int is_green_sardine = 0;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev);
+#endif
clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = &dcn21_funcs;
if (clk_mgr->periodic_retraining_disabled) {
rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt;
} else {
- rn_bw_params.wm_table = lpddr4_wm_table;
+ if (is_green_sardine)
+ rn_bw_params.wm_table = lpddr4_wm_table_gs;
+ else
+ rn_bw_params.wm_table = lpddr4_wm_table_rn;
}
} else {
- rn_bw_params.wm_table = ddr4_wm_table;
+ if (is_green_sardine)
+ rn_bw_params.wm_table = ddr4_wm_table_gs;
+ else
+ rn_bw_params.wm_table = ddr4_wm_table_rn;
}
/* Saved clocks configured at boot for debug purposes */
rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
{
uint32_t bits_per_channel = 0;
uint32_t kbps;
+ struct fixed31_32 link_bw_kbps;
if (timing->flags.DSC) {
- kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel);
- kbps = kbps / 160 + ((kbps % 160) ? 1 : 0);
+ link_bw_kbps = dc_fixpt_from_int(timing->pix_clk_100hz);
+ link_bw_kbps = dc_fixpt_div_int(link_bw_kbps, 160);
+ link_bw_kbps = dc_fixpt_mul_int(link_bw_kbps, timing->dsc_cfg.bits_per_pixel);
+ kbps = dc_fixpt_ceil(link_bw_kbps);
return kbps;
}
#define FEATURE_CORE_CSTATES_MASK (1 << FEATURE_CORE_CSTATES_BIT)
/* Workload bits */
-#define WORKLOAD_DEFAULT_BIT 0
-#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1
-#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2
-#define WORKLOAD_PPLIB_VIDEO_BIT 3
-#define WORKLOAD_PPLIB_VR_BIT 4
-#define WORKLOAD_PPLIB_COMPUTE_BIT 5
-#define WORKLOAD_PPLIB_CUSTOM_BIT 6
-#define WORKLOAD_PPLIB_COUNT 7
+#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0
+#define WORKLOAD_PPLIB_VIDEO_BIT 2
+#define WORKLOAD_PPLIB_VR_BIT 3
+#define WORKLOAD_PPLIB_COMPUTE_BIT 4
+#define WORKLOAD_PPLIB_CUSTOM_BIT 5
+#define WORKLOAD_PPLIB_COUNT 6
typedef struct {
/* MP1_EXT_SCRATCH0 */
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/pci.h>
+
#include <drm/amdgpu_drm.h>
#include "processpptables.h"
#include <atom-types.h>
struct pp_hwmgr *hwmgr,
const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
{
+ struct amdgpu_device *adev = hwmgr->adev;
+
hwmgr->thermal_controller.ucType =
powerplay_table->sThermalController.ucType;
hwmgr->thermal_controller.ucI2cLine =
ATOM_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
PHM_PlatformCaps_ThermalController);
- hwmgr->thermal_controller.use_hw_fan_control = 1;
+ if (powerplay_table->usTableSize >= sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) {
+ const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 =
+ (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table;
+
+ if (0 == le16_to_cpu(powerplay_table3->usFanTableOffset)) {
+ hwmgr->thermal_controller.use_hw_fan_control = 1;
+ return 0;
+ } else {
+ const ATOM_PPLIB_FANTABLE *fan_table =
+ (const ATOM_PPLIB_FANTABLE *)(((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table3->usFanTableOffset));
+
+ if (1 <= fan_table->ucFanTableFormat) {
+ hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst =
+ fan_table->ucTHyst;
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin =
+ le16_to_cpu(fan_table->usTMin);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed =
+ le16_to_cpu(fan_table->usTMed);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTHigh =
+ le16_to_cpu(fan_table->usTHigh);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin =
+ le16_to_cpu(fan_table->usPWMMin);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed =
+ le16_to_cpu(fan_table->usPWMMed);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh =
+ le16_to_cpu(fan_table->usPWMHigh);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMax = 10900;
+ hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay = 100000;
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ }
+
+ if (2 <= fan_table->ucFanTableFormat) {
+ const ATOM_PPLIB_FANTABLE2 *fan_table2 =
+ (const ATOM_PPLIB_FANTABLE2 *)(((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table3->usFanTableOffset));
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMax =
+ le16_to_cpu(fan_table2->usTMax);
+ }
+
+ if (3 <= fan_table->ucFanTableFormat) {
+ const ATOM_PPLIB_FANTABLE3 *fan_table3 =
+ (const ATOM_PPLIB_FANTABLE3 *) (((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table3->usFanTableOffset));
+
+ hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode =
+ fan_table3->ucFanControlMode;
+
+ if ((3 == fan_table->ucFanTableFormat) &&
+ (0x67B1 == adev->pdev->device))
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM =
+ 47;
+ else
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM =
+ le16_to_cpu(fan_table3->usFanPWMMax);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity =
+ 4836;
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+ le16_to_cpu(fan_table3->usFanOutputSensitivity);
+ }
+
+ if (6 <= fan_table->ucFanTableFormat) {
+ const ATOM_PPLIB_FANTABLE4 *fan_table4 =
+ (const ATOM_PPLIB_FANTABLE4 *)(((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table3->usFanTableOffset));
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM =
+ le16_to_cpu(fan_table4->usFanRPMMax);
+ }
+
+ if (7 <= fan_table->ucFanTableFormat) {
+ const ATOM_PPLIB_FANTABLE5 *fan_table5 =
+ (const ATOM_PPLIB_FANTABLE5 *)(((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table3->usFanTableOffset));
+
+ if (0x67A2 == adev->pdev->device ||
+ 0x67A9 == adev->pdev->device ||
+ 0x67B9 == adev->pdev->device) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_GeminiRegulatorFanControlSupport);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanCurrentLow =
+ le16_to_cpu(fan_table5->usFanCurrentLow);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanCurrentHigh =
+ le16_to_cpu(fan_table5->usFanCurrentHigh);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMLow =
+ le16_to_cpu(fan_table5->usFanRPMLow);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMHigh =
+ le16_to_cpu(fan_table5->usFanRPMHigh);
+ }
+ }
+ }
+ }
return 0;
}
int pplib_workload = 0;
switch (power_profile) {
- case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
- pplib_workload = WORKLOAD_DEFAULT_BIT;
- break;
case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
break;
- case PP_SMC_POWER_PROFILE_POWERSAVING:
- pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
- break;
case PP_SMC_POWER_PROFILE_VIDEO:
pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
break;
case PP_SMC_POWER_PROFILE_COMPUTE:
pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
break;
+ case PP_SMC_POWER_PROFILE_CUSTOM:
+ pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
+ break;
}
return pplib_workload;
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
- WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
*/
ret = intel_initial_commit(&i915->drm);
if (ret)
- return ret;
+ drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
intel_overlay_setup(i915);
return 0;
}
/* Also take into account max slice width */
- min_slice_count = min_t(u8, min_slice_count,
+ min_slice_count = max_t(u8, min_slice_count,
DIV_ROUND_UP(mode_hdisplay,
max_slice_width));
break;
}
-static void eb_request_add(struct i915_execbuffer *eb)
+static int eb_request_add(struct i915_execbuffer *eb, int err)
{
struct i915_request *rq = eb->request;
struct intel_timeline * const tl = i915_request_timeline(rq);
/* Serialise with context_close via the add_to_timeline */
i915_request_set_error_once(rq, -ENOENT);
__i915_request_skip(rq);
+ err = -ENOENT; /* override any transient errors */
}
__i915_request_queue(rq, &attr);
retire_requests(tl, prev);
mutex_unlock(&tl->mutex);
+
+ return err;
}
static const i915_user_extension_fn execbuf_extensions[] = {
err = eb_submit(&eb, batch);
err_request:
i915_request_get(eb.request);
- eb_request_add(&eb);
+ err = eb_request_add(&eb, err);
if (eb.fences)
signal_fence_array(&eb);
static bool execlists_hold(struct intel_engine_cs *engine,
struct i915_request *rq)
{
+ if (i915_request_on_hold(rq))
+ return false;
+
spin_lock_irq(&engine->active.lock);
if (i915_request_completed(rq)) { /* too late! */
spin_unlock_irqrestore(&engine->active.lock, flags);
/* Recheck after serialising with direct-submission */
- if (unlikely(timeout && preempt_timeout(engine)))
+ if (unlikely(timeout && preempt_timeout(engine))) {
+ cancel_timer(&engine->execlists.preempt);
execlists_reset(engine, "preemption time out");
+ }
}
}
#define _L3_CACHEABILITY(value) ((value) << 4)
/* Helper defines */
-#define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */
-#define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
+#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
/* (e)LLC caching options */
/*
if (INTEL_GEN(i915) >= 12) {
table->size = ARRAY_SIZE(tgl_mocs_table);
table->table = tgl_mocs_table;
- table->n_entries = GEN11_NUM_MOCS_ENTRIES;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
} else if (IS_GEN(i915, 11)) {
table->size = ARRAY_SIZE(icl_mocs_table);
table->table = icl_mocs_table;
- table->n_entries = GEN11_NUM_MOCS_ENTRIES;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
} else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
table->size = ARRAY_SIZE(skl_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
mapping_set_unevictable(file->f_mapping);
return vaddr;
err_page:
- while (--i >= 0)
+ while (i--)
put_page(pages[i]);
kvfree(pages);
return NULL;
return PTR_ERR(obj);
obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
+ if (IS_ERR(obj2)) {
+ err = PTR_ERR(obj2);
goto put1;
}
unsigned long flags;
rdma_for_each_port(device, port_num) {
- if (!rdma_ib_or_roce(device, port_num))
- continue;
-
table = rdma_gid_table(device, port_num);
read_lock_irqsave(&table->rwlock, flags);
for (i = 0; i < table->sz; i++) {
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
+ cm_id_priv->timewait_info = NULL;
goto out;
}
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
+ cm_id_priv->timewait_info = NULL;
goto destroy;
}
cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
1);
EFA_SET(¶ms.modify_mask,
EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1);
- params.cur_qp_state = qp_attr->cur_qp_state;
- params.qp_state = qp_attr->qp_state;
+ params.cur_qp_state = cur_state;
+ params.qp_state = new_state;
}
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
}
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
+
+ /* calculate the db_rec_db2 data since it is constant so no
+ * need to reflect from user
+ */
+ qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
+ qp->urq.db_rec_db2_data.data.value =
+ cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
+
rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
&qp->urq.db_rec_db2_data,
DB_REC_WIDTH_32B,
#define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60)
#define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1)
#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
-#define DTE_IRQ_TABLE_LEN (8ULL << 1)
+#define DTE_IRQ_TABLE_LEN (9ULL << 1)
#define DTE_IRQ_REMAP_ENABLE 1ULL
#define PAGE_MODE_NONE 0x00
MSGCODE_SET_PHYSICAL_ADDRESS, /* 0x20 */
MSGCODE_GET_DEVICE_TYPE,
MSGCODE_SET_DEVICE_TYPE,
- MSGCODE_GET_HDMI_VERSION,
+ MSGCODE_GET_HDMI_VERSION, /* Removed in FW >= 10 */
MSGCODE_SET_HDMI_VERSION,
MSGCODE_GET_OSD_NAME,
MSGCODE_SET_OSD_NAME,
MSGCODE_WRITE_EEPROM,
MSGCODE_GET_ADAPTER_TYPE, /* 0x28 */
MSGCODE_SET_ACTIVE_SOURCE,
+ MSGCODE_GET_AUTO_POWER_ON, /* New for FW >= 10 */
+ MSGCODE_SET_AUTO_POWER_ON,
MSGCODE_FRAME_EOM = 0x80,
MSGCODE_FRAME_ACK = 0x40,
"WRITE_EEPROM",
"GET_ADAPTER_TYPE",
"SET_ACTIVE_SOURCE",
+ "GET_AUTO_POWER_ON",
+ "SET_AUTO_POWER_ON",
};
static const char *pulse8_msgname(u8 cmd)
if (err)
goto unlock;
- cmd[0] = MSGCODE_SET_HDMI_VERSION;
- cmd[1] = adap->log_addrs.cec_version;
- err = pulse8_send_and_wait(pulse8, cmd, 2,
- MSGCODE_COMMAND_ACCEPTED, 0);
- if (err)
- goto unlock;
+ if (pulse8->vers < 10) {
+ cmd[0] = MSGCODE_SET_HDMI_VERSION;
+ cmd[1] = adap->log_addrs.cec_version;
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ if (err)
+ goto unlock;
+ }
if (adap->log_addrs.osd_name[0]) {
size_t osd_len = strlen(adap->log_addrs.osd_name);
struct pulse8 *pulse8 = serio_get_drvdata(serio);
cec_unregister_adapter(pulse8->adap);
- pulse8->serio = NULL;
serio_set_drvdata(serio, NULL);
serio_close(serio);
}
dev_dbg(pulse8->dev, "Autonomous mode: %s",
data[0] ? "on" : "off");
+ if (pulse8->vers >= 10) {
+ cmd[0] = MSGCODE_GET_AUTO_POWER_ON;
+ err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+ if (!err)
+ dev_dbg(pulse8->dev, "Auto Power On: %s",
+ data[0] ? "on" : "off");
+ }
+
cmd[0] = MSGCODE_GET_DEVICE_TYPE;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
if (err)
dev_dbg(pulse8->dev, "Physical address: %x.%x.%x.%x\n",
cec_phys_addr_exp(*pa));
- cmd[0] = MSGCODE_GET_HDMI_VERSION;
- err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
- if (err)
- return err;
- log_addrs->cec_version = data[0];
- dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
+ log_addrs->cec_version = CEC_OP_CEC_VERSION_1_4;
+ if (pulse8->vers < 10) {
+ cmd[0] = MSGCODE_GET_HDMI_VERSION;
+ err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+ if (err)
+ return err;
+ log_addrs->cec_version = data[0];
+ dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
+ }
cmd[0] = MSGCODE_GET_OSD_NAME;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 0);
pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8,
dev_name(&serio->dev), caps, 1);
err = PTR_ERR_OR_ZERO(pulse8->adap);
- if (err < 0)
- goto free_device;
+ if (err < 0) {
+ kfree(pulse8);
+ return err;
+ }
pulse8->dev = &serio->dev;
serio_set_drvdata(serio, pulse8);
serio_close(serio);
delete_adap:
cec_delete_adapter(pulse8->adap);
-free_device:
- kfree(pulse8);
return err;
}
vb->index = q->num_buffers + buffer;
vb->type = q->type;
vb->memory = memory;
+ /*
+ * We need to set these flags here so that the videobuf2 core
+ * will call ->prepare()/->finish() cache sync/flush on vb2
+ * buffers when appropriate. However, we can avoid explicit
+ * ->prepare() and ->finish() cache sync for DMABUF buffers,
+ * because DMA exporter takes care of it.
+ */
+ if (q->memory != VB2_MEMORY_DMABUF) {
+ vb->need_cache_sync_on_prepare = 1;
+ vb->need_cache_sync_on_finish = 1;
+ }
for (plane = 0; plane < num_planes; ++plane) {
vb->planes[plane].length = plane_sizes[plane];
vb->planes[plane].min_length = plane_sizes[plane];
{
u32 val;
- /* Period of raw software sampling in ns */
- val = DIV_ROUND_CLOSEST(1000000000ul,
- clk_get_rate(ir->bus) / ir->data->div);
-
/*
* Period for software decoder used in the
* unit of raw software sampling
*/
- val = DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, val);
+ val = DIV_ROUND_CLOSEST(clk_get_rate(ir->bus),
+ USEC_PER_SEC * ir->data->div / MTK_IR_SAMPLE);
dev_dbg(ir->dev, "@pwm clk = \t%lu\n",
clk_get_rate(ir->bus) / ir->data->div);
mtk_irq_enable(ir, MTK_IRINT_EN);
dev_info(dev, "Initialized MT7623 IR driver, sample period = %dus\n",
- DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, 1000));
+ MTK_IR_SAMPLE);
return 0;
{
u32 i;
- vidtv_psi_pat_table_destroy(m->si.pat);
-
for (i = 0; i < m->si.pat->num_pmt; ++i)
vidtv_psi_pmt_table_destroy(m->si.pmt_secs[i]);
+ vidtv_psi_pat_table_destroy(m->si.pat);
+
kfree(m->si.pmt_secs);
vidtv_psi_sdt_table_destroy(m->si.sdt);
vidtv_psi_nit_table_destroy(m->si.nit);
struct vidtv_psi_desc *desc);
/**
- * vidtv_psi_pmt_desc_assign - Assigns a descriptor loop at some point in a PMT section.
+ * vidtv_pmt_desc_assign - Assigns a descriptor loop at some point in a PMT section.
* @pmt: The PMT section that will contain the descriptor loop
* @to: Where in the PMT to assign this descriptor loop to
* @desc: The descriptor loop that will be assigned.
struct vidtv_psi_desc *desc);
/**
- * vidtv_psi_sdt_desc_assign - Assigns a descriptor loop at some point in a SDT.
+ * vidtv_sdt_desc_assign - Assigns a descriptor loop at some point in a SDT.
* @sdt: The SDT that will contain the descriptor loop
* @to: Where in the PMT to assign this descriptor loop to
* @desc: The descriptor loop that will be assigned.
struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc);
/**
- * vidtv_psi_create_sec_for_each_pat_entry - Create a PMT section for each
+ * vidtv_psi_pmt_create_sec_for_each_pat_entry - Create a PMT section for each
* program found in the PAT
* @pat: The PAT to look for programs.
* @pcr_pid: packet ID for the PCR to be used for the program described in this
struct vidtv_psi_table_eit
*vidtv_psi_eit_table_init(u16 network_id,
u16 transport_stream_id,
- u16 service_id);
+ __be16 service_id);
/**
* struct vidtv_psi_eit_write_args - Arguments for writing an EIT section
e->is_video_encoder = false;
ctx = kzalloc(priv_sz, GFP_KERNEL);
- if (!ctx)
+ if (!ctx) {
+ kfree(e);
return NULL;
+ }
e->ctx = ctx;
ctx->last_duration = 0;
u8 adaptation_field:1;
u8 scrambling:2;
} __packed;
- struct vidtv_mpeg_ts_adaption adaption[];
+ struct vidtv_mpeg_ts_adaption *adaption;
} __packed;
/**
return &bond_opts[option];
}
+static void bond_set_xfrm_features(struct net_device *bond_dev, u64 mode)
+{
+ if (!IS_ENABLED(CONFIG_XFRM_OFFLOAD))
+ return;
+
+ if (mode == BOND_MODE_ACTIVEBACKUP)
+ bond_dev->wanted_features |= BOND_XFRM_FEATURES;
+ else
+ bond_dev->wanted_features &= ~BOND_XFRM_FEATURES;
+
+ netdev_update_features(bond_dev);
+}
+
static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
if (newval->value == BOND_MODE_ALB)
bond->params.tlb_dynamic_lb = 1;
-#ifdef CONFIG_XFRM_OFFLOAD
- if (newval->value == BOND_MODE_ACTIVEBACKUP)
- bond->dev->wanted_features |= BOND_XFRM_FEATURES;
- else
- bond->dev->wanted_features &= ~BOND_XFRM_FEATURES;
- netdev_change_features(bond->dev);
-#endif /* CONFIG_XFRM_OFFLOAD */
+ if (bond->dev->reg_state == NETREG_REGISTERED)
+ bond_set_xfrm_features(bond->dev, newval->value);
/* don't cache arp_validate between modes */
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
/* check or determine and set bittime */
ret = open_candev(ndev);
- if (!ret)
- ret = softing_startstop(ndev, 1);
+ if (ret)
+ return ret;
+
+ ret = softing_startstop(ndev, 1);
+ if (ret < 0)
+ close_candev(ndev);
+
return ret;
}
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
int port, err;
- int tc;
err = felix_init_structs(felix, ds->num_ports);
if (err)
ocelot_write_rix(ocelot,
ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
ANA_PGID_PGID, PGID_UC);
- /* Setup the per-traffic class flooding PGIDs */
- for (tc = 0; tc < FELIX_NUM_TC; tc++)
- ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
- ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
- ANA_FLOODING_FLD_UNICAST(PGID_UC),
- ANA_FLOODING, tc);
ds->mtu_enforcement_ingress = true;
ds->configure_vlan_while_not_filtering = true;
pci_set_drvdata(pdev, felix);
ocelot = &felix->ocelot;
ocelot->dev = &pdev->dev;
+ ocelot->num_flooding_pgids = FELIX_NUM_TC;
felix->info = &felix_info_vsc9959;
felix->switch_base = pci_resource_start(pdev,
felix->info->switch_pci_bar);
ocelot = &felix->ocelot;
ocelot->dev = &pdev->dev;
+ ocelot->num_flooding_pgids = 1;
felix->info = &seville_info_vsc9953;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tristate "Agere ET-1310 Gigabit Ethernet support"
depends on PCI
select PHYLIB
+ select CRC32
help
This driver supports Agere ET-1310 ethernet adapters.
tristate "Cadence MACB/GEM support"
depends on HAS_DMA && COMMON_CLK
select PHYLINK
+ select CRC32
help
The Cadence MACB ethernet interface is found on many Atmel AT32 and
AT91 parts. This driver also supports the Cadence GEM (Gigabit
struct fw_eth_tx_pkt_wr *wr;
struct cpl_tx_pkt_core *cpl;
u32 ctrl, iplen, maclen;
-#if IS_ENABLED(CONFIG_IPV6)
struct ipv6hdr *ip6;
-#endif
unsigned int ndesc;
struct tcphdr *tcp;
int len16, pktlen;
cpl->len = htons(pktlen);
memcpy(buf, skb->data, pktlen);
- if (tx_info->ip_family == AF_INET) {
+ if (!IS_ENABLED(CONFIG_IPV6) || tx_info->ip_family == AF_INET) {
/* we need to correct ip header len */
ip = (struct iphdr *)(buf + maclen);
ip->tot_len = htons(pktlen - maclen);
cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
-#if IS_ENABLED(CONFIG_IPV6)
} else {
ip6 = (struct ipv6hdr *)(buf + maclen);
ip6->payload_len = htons(pktlen - maclen - iplen);
cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
-#endif
}
cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
depends on !64BIT || BROKEN
select PHYLIB
select MDIO_ASPEED if MACH_ASPEED_G6
+ select CRC32
help
This driver supports the FTGMAC100 Gigabit Ethernet controller
from Faraday. It is used on Faraday A369, Andes AG102 and some
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || SOC_IMX28 || COMPILE_TEST)
default ARCH_MXC || SOC_IMX28 if ARM
+ select CRC32
select PHYLIB
imply PTP_1588_CLOCK
help
if (!of_device_is_available(node)) {
netdev_err(mac->net_dev, "pcs-handle node not available\n");
+ of_node_put(node);
return -ENODEV;
}
{ ENETC_PM0_R255, "MAC rx 128-255 byte packets" },
{ ENETC_PM0_R511, "MAC rx 256-511 byte packets" },
{ ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" },
- { ENETC_PM0_R1518, "MAC rx 1024-1518 byte packets" },
- { ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" },
+ { ENETC_PM0_R1522, "MAC rx 1024-1522 byte packets" },
+ { ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" },
{ ENETC_PM0_ROVR, "MAC rx oversized packets" },
{ ENETC_PM0_RJBR, "MAC rx jabber packets" },
{ ENETC_PM0_RFRG, "MAC rx fragment packets" },
{ ENETC_PM0_TBCA, "MAC tx broadcast frames" },
{ ENETC_PM0_TPKT, "MAC tx packets" },
{ ENETC_PM0_TUND, "MAC tx undersized packets" },
+ { ENETC_PM0_T64, "MAC tx 64 byte packets" },
{ ENETC_PM0_T127, "MAC tx 65-127 byte packets" },
+ { ENETC_PM0_T255, "MAC tx 128-255 byte packets" },
+ { ENETC_PM0_T511, "MAC tx 256-511 byte packets" },
{ ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" },
- { ENETC_PM0_T1518, "MAC tx 1024-1518 byte packets" },
+ { ENETC_PM0_T1522, "MAC tx 1024-1522 byte packets" },
+ { ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" },
{ ENETC_PM0_TCNP, "MAC tx control packets" },
{ ENETC_PM0_TDFR, "MAC tx deferred packets" },
{ ENETC_PM0_TMCOL, "MAC tx multiple collisions" },
#define ENETC_PM0_R255 0x8180
#define ENETC_PM0_R511 0x8188
#define ENETC_PM0_R1023 0x8190
-#define ENETC_PM0_R1518 0x8198
-#define ENETC_PM0_R1519X 0x81A0
+#define ENETC_PM0_R1522 0x8198
+#define ENETC_PM0_R1523X 0x81A0
#define ENETC_PM0_ROVR 0x81A8
#define ENETC_PM0_RJBR 0x81B0
#define ENETC_PM0_RFRG 0x81B8
#define ENETC_PM0_TBCA 0x8250
#define ENETC_PM0_TPKT 0x8260
#define ENETC_PM0_TUND 0x8268
+#define ENETC_PM0_T64 0x8270
#define ENETC_PM0_T127 0x8278
+#define ENETC_PM0_T255 0x8280
+#define ENETC_PM0_T511 0x8288
#define ENETC_PM0_T1023 0x8290
-#define ENETC_PM0_T1518 0x8298
+#define ENETC_PM0_T1522 0x8298
+#define ENETC_PM0_T1523X 0x82A0
#define ENETC_PM0_TCNP 0x82C0
#define ENETC_PM0_TDFR 0x82D0
#define ENETC_PM0_TMCOL 0x82D8
depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
select GENERIC_ALLOCATOR
select PHYLIB
+ select CRC32
default n
help
Freescale Data-Path Acceleration Architecture Frame Manager
#define HCLGE_DBG_DFX_SSU_2_OFFSET 12
-#pragma pack(1)
-
struct hclge_qos_pri_map_cmd {
u8 pri0_tc : 4,
pri1_tc : 4;
struct hclge_dbg_reg_common_msg reg_msg;
};
-#pragma pack()
-
static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
{true, "BP_CPU_STATE"},
/* Ungate PGCB clock */
mac_data = er32(FEXTNVM9);
- mac_data |= BIT(28);
+ mac_data &= ~BIT(28);
ew32(FEXTNVM9, mac_data);
/* Enable K1 off to enable mPHY Power Gating */
mac_data = er32(FEXTNVM6);
mac_data |= BIT(31);
- ew32(FEXTNVM12, mac_data);
+ ew32(FEXTNVM6, mac_data);
/* Enable mPHY power gating for any link and speed */
mac_data = er32(FEXTNVM8);
/* Disable K1 off */
mac_data = er32(FEXTNVM6);
mac_data &= ~BIT(31);
- ew32(FEXTNVM12, mac_data);
+ ew32(FEXTNVM6, mac_data);
/* Disable Ungate PGCB clock */
mac_data = er32(FEXTNVM9);
- mac_data &= ~BIT(28);
+ mac_data |= BIT(28);
ew32(FEXTNVM9, mac_data);
/* Cancel not waking from dynamic
* the adapter for another receive
*
* @rx_buffer: buffer containing the page
+ * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
*
* If page is reusable, rx_buffer->page_offset is adjusted to point to
* an unused region in the page.
*
* In either case, if the page is reusable its refcount is increased.
**/
-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define I40E_LAST_OFFSET \
* i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
* @rx_ring: rx descriptor ring to transact packets on
* @size: size of buffer to add to skb
+ * @rx_buffer_pgcnt: buffer page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
- const unsigned int size)
+ const unsigned int size,
+ int *rx_buffer_pgcnt)
{
struct i40e_rx_buffer *rx_buffer;
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+ *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buffer->page);
+#else
+ 0;
+#endif
prefetch_page_address(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
* i40e_put_rx_buffer - Clean up used buffer and either recycle or free
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from
+ * @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call
*
* This function will clean up the contents of the rx_buffer. It will
* either recycle the buffer or unmap it and free the associated resources.
*/
static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer)
+ struct i40e_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
- if (i40e_can_reuse_rx_page(rx_buffer)) {
+ if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
} else {
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
+ int rx_buffer_pgcnt;
unsigned int size;
u64 qword;
break;
i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
- rx_buffer = i40e_get_rx_buffer(rx_ring, size);
+ rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
/* retrieve a buffer from the ring */
if (!skb) {
break;
}
- i40e_put_rx_buffer(rx_ring, rx_buffer);
+ i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
cleaned_count++;
if (i40e_is_non_eop(rx_ring, rx_desc, skb))
/**
* ice_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buf: buffer containing the page
+ * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
*
* If page is reusable, we have a green light for calling ice_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
* pointing to; otherwise, the DMA mapping needs to be destroyed and
* page freed
*/
-static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
+static bool
+ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
{
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define ICE_LAST_OFFSET \
* @rx_ring: Rx descriptor ring to transact packets on
* @skb: skb to be used
* @size: size of buffer to add to skb
+ * @rx_buf_pgcnt: rx_buf page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct ice_rx_buf *
ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
- const unsigned int size)
+ const unsigned int size, int *rx_buf_pgcnt)
{
struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
+ *rx_buf_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buf->page);
+#else
+ 0;
+#endif
prefetchw(rx_buf->page);
*skb = rx_buf->skb;
* ice_put_rx_buf - Clean up used buffer and either recycle or free
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
+ * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
*
* This function will update next_to_clean and then clean up the contents
* of the rx_buf. It will either recycle the buffer or unmap it and free
* the associated resources.
*/
-static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+static void
+ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ int rx_buf_pgcnt)
{
u16 ntc = rx_ring->next_to_clean + 1;
if (!rx_buf)
return;
- if (ice_can_reuse_rx_page(rx_buf)) {
+ if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
} else {
struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
+ int rx_buf_pgcnt;
u16 vlan_tag = 0;
u8 rx_ptype;
dma_rmb();
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
- ice_put_rx_buf(rx_ring, NULL);
+ ice_put_rx_buf(rx_ring, NULL, 0);
cleaned_count++;
continue;
}
ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */
- rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
+ rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt);
if (!size) {
xdp.data = NULL;
total_rx_pkts++;
cleaned_count++;
- ice_put_rx_buf(rx_ring, rx_buf);
+ ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
continue;
construct_skb:
if (skb) {
break;
}
- ice_put_rx_buf(rx_ring, rx_buf);
+ ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
cleaned_count++;
/* skip if it is NOP desc */
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+#define IGB_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+
/* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_1536 1536
#define IGB_SFF_ADDRESSING_MODE 0x4
#define IGB_SFF_8472_UNSUP 0x00
+/* TX resources are shared between XDP and netstack
+ * and we need to tag the buffer type to distinguish them
+ */
enum igb_tx_buf_type {
IGB_TYPE_SKB = 0,
IGB_TYPE_XDP,
}
}
-static int igb_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
+static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
{
- int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD;
struct igb_adapter *adapter = netdev_priv(dev);
+ struct bpf_prog *prog = bpf->prog, *old_prog;
bool running = netif_running(dev);
- struct bpf_prog *old_prog;
bool need_reset;
/* verify igb ring attributes are sufficient for XDP */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = adapter->rx_ring[i];
- if (frame_size > igb_rx_bufsz(ring))
+ if (frame_size > igb_rx_bufsz(ring)) {
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "The RX buffer size is too small for the frame size");
+ netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n",
+ igb_rx_bufsz(ring), frame_size);
return -EINVAL;
+ }
}
old_prog = xchg(&adapter->xdp_prog, prog);
{
switch (xdp->command) {
case XDP_SETUP_PROG:
- return igb_xdp_setup(dev, xdp->prog);
+ return igb_xdp_setup(dev, xdp);
default:
return -EINVAL;
}
*/
tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
- return -ENXIO;
+ return IGB_XDP_CONSUMED;
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ nq->trans_start = jiffies;
ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
__netif_tx_unlock(nq);
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ nq->trans_start = jiffies;
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
/* set default work limits */
adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
- adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN;
+ adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
spin_lock_init(&adapter->nfc_lock);
static int igb_change_mtu(struct net_device *netdev, int new_mtu)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
if (adapter->xdp_prog) {
int i;
struct igb_ring *ring = adapter->rx_ring[i];
if (max_frame > igb_rx_bufsz(ring)) {
- netdev_warn(adapter->netdev, "Requested MTU size is not supported with XDP\n");
+ netdev_warn(adapter->netdev,
+ "Requested MTU size is not supported with XDP. Max frame size is %d\n",
+ max_frame);
return -EINVAL;
}
}
SKB_DATA_ALIGN(xdp->data_end -
xdp->data_hard_start);
#endif
+ unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
/* prefetch first cache line of first page */
skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
/* pull timestamp out of packet data */
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
rx_ring->skb = skb;
if (xdp_xmit & IGB_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_xmit & IGB_XDP_TX) {
struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
-static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
+static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
/* The last offset is a bit aggressive in that we assume the
static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff **skb,
- const unsigned int size)
+ const unsigned int size,
+ int *rx_buffer_pgcnt)
{
struct ixgbe_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buffer->page);
+#else
+ 0;
+#endif
prefetchw(rx_buffer->page);
*skb = rx_buffer->skb;
static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ int rx_buffer_pgcnt)
{
- if (ixgbe_can_reuse_rx_page(rx_buffer)) {
+ if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else {
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
+ int rx_buffer_pgcnt;
unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
*/
dma_rmb();
- rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
+ rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
/* retrieve a buffer from the ring */
if (!skb) {
break;
}
- ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
+ ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
cleaned_count++;
/* place incomplete frames back on ring for completion */
goto err_port_init;
}
- if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX)
+ if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX) {
+ err = -EINVAL;
goto err_port_init;
+ }
/* firmware requires that port's MAC address consist of the first
* 5 bytes of the base MAC address
tx_ring->cons, tx_ring->prod);
priv->port_stats.tx_timeout++;
- en_dbg(DRV, priv, "Scheduling watchdog\n");
- queue_work(mdev->workqueue, &priv->watchdog_task);
+ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) {
+ en_dbg(DRV, priv, "Scheduling port restart\n");
+ queue_work(mdev->workqueue, &priv->restart_task);
+ }
}
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
+ clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state);
if (t != TX_XDP) {
tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
tx_ring->recycle_ring = NULL;
local_bh_enable();
}
+ clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state);
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
static void mlx4_en_restart(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
- watchdog_task);
+ restart_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
if (netif_running(dev)) {
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
- /* NIC is probably restarting - let watchdog task reset
+ /* NIC is probably restarting - let restart task reset
* the port */
en_dbg(DRV, priv, "Change MTU called with card down!?\n");
} else {
if (err) {
en_err(priv, "Failed restarting port:%d\n",
priv->port);
- queue_work(mdev->workqueue, &priv->watchdog_task);
+ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING,
+ &priv->state))
+ queue_work(mdev->workqueue, &priv->restart_task);
}
}
mutex_unlock(&mdev->state_lock);
if (err) {
en_err(priv, "Failed starting port %d for XDP change\n",
priv->port);
- queue_work(mdev->workqueue, &priv->watchdog_task);
+ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
+ queue_work(mdev->workqueue, &priv->restart_task);
}
}
priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
- INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
+ INIT_WORK(&priv->restart_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
return cnt;
}
+static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe,
+ u16 cqe_index, struct mlx4_en_tx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_info *tx_info;
+ struct mlx4_en_tx_desc *tx_desc;
+ u16 wqe_index;
+ int desc_size;
+
+ en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n",
+ ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe),
+ false);
+
+ wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask;
+ tx_info = &ring->tx_info[wqe_index];
+ desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE;
+ en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn,
+ wqe_index, desc_size);
+ tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false);
+
+ if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
+ return;
+
+ en_err(priv, "Scheduling port restart\n");
+ queue_work(mdev->workqueue, &priv->restart_task);
+}
+
int mlx4_en_process_tx_cq(struct net_device *dev,
struct mlx4_en_cq *cq, int napi_budget)
{
dma_rmb();
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
- MLX4_CQE_OPCODE_ERROR)) {
- struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
-
- en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
- cqe_err->vendor_err_syndrome,
- cqe_err->syndrome);
- }
+ MLX4_CQE_OPCODE_ERROR))
+ if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state))
+ mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index,
+ ring);
/* Skip over last polled CQE */
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
} buf[MLX4_EN_CACHE_SIZE];
};
+enum {
+ MLX4_EN_TX_RING_STATE_RECOVERING,
+};
+
struct mlx4_en_priv;
struct mlx4_en_tx_ring {
* Only queue_stopped might be used if BQL is not properly working.
*/
unsigned long queue_stopped;
+ unsigned long state;
struct mlx4_hwq_resources sp_wqres;
struct mlx4_qp sp_qp;
struct mlx4_qp_context sp_context;
struct mutex mutex; /* for mutual access to stats bitmap */
};
+enum {
+ MLX4_EN_STATE_FLAG_RESTARTING,
+};
+
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
struct work_struct rx_mode_task;
- struct work_struct watchdog_task;
+ struct work_struct restart_task;
struct work_struct linkstate_task;
struct delayed_work stats_task;
struct delayed_work service_task;
u32 pflags;
u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
u8 rss_hash_fn;
+ unsigned long state;
};
enum mlx4_en_wol {
config MLX5_SW_STEERING
bool "Mellanox Technologies software-managed steering"
depends on MLX5_CORE_EN && MLX5_ESWITCH
+ select CRC32
default y
help
Build support for software-managed steering in the NIC.
depends on PCI
select PHYLIB
select CRC16
+ select CRC32
help
Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip
SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING);
/* Setup flooding PGIDs */
- ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
- ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
- ANA_FLOODING_FLD_UNICAST(PGID_UC),
- ANA_FLOODING, 0);
+ for (i = 0; i < ocelot->num_flooding_pgids; i++)
+ ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
+ ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
+ ANA_FLOODING_FLD_UNICAST(PGID_UC),
+ ANA_FLOODING, i);
ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) |
ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) |
}
ocelot->num_phys_ports = of_get_child_count(ports);
+ ocelot->num_flooding_pgids = 1;
ocelot->vcap = vsc7514_vcap_props;
ocelot->inj_prefix = OCELOT_TAG_PREFIX_NONE;
depends on VXLAN || VXLAN=n
depends on TLS && TLS_DEVICE || TLS_DEVICE=n
select NET_DEVLINK
+ select CRC32
help
This driver supports the Netronome(R) NFP4000/NFP6000 based
cards working as a advanced Ethernet NIC. It works with both
struct nfp_net_dp *dp;
int err;
- if (!xdp_attachment_flags_ok(&nn->xdp, bpf))
- return -EBUSY;
-
if (!prog == !nn->dp.xdp_prog) {
WRITE_ONCE(nn->dp.xdp_prog, prog);
xdp_attachment_setup(&nn->xdp, bpf);
{
int err;
- if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf))
- return -EBUSY;
-
err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
if (err)
return err;
tristate "NXP ethernet MAC on LPC devices"
depends on ARCH_LPC32XX || COMPILE_TEST
select PHYLIB
+ select CRC32
help
Say Y or M here if you want to use the NXP ethernet MAC included on
some NXP LPC devices. You can safely enable this option for LPC32xx
config ROCKER
tristate "Rocker switch driver (EXPERIMENTAL)"
depends on PCI && NET_SWITCHDEV && BRIDGE
+ select CRC32
help
This driver supports Rocker switch device.
goto err_parse_dt;
}
- ret = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(dwmac->ops->addr_width));
- if (ret) {
- dev_err(&pdev->dev, "DMA mask set failed\n");
- goto err_dma_mask;
- }
-
+ plat_dat->addr64 = dwmac->ops->addr_width;
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
err_dwmac_init:
err_drv_probe:
imx_dwmac_exit(pdev, plat_dat->bsp_priv);
-err_dma_mask:
err_parse_dt:
err_match_data:
stmmac_remove_config_dt(pdev, plat_dat);
#define PRG_ETH0_EXT_RMII_MODE 4
/* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
-#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
/* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where 8ns are exactly one
return -ENOMEM;
clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0;
- clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT;
- clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK;
+ clk_configs->m250_mux.shift = __ffs(PRG_ETH0_CLK_M250_SEL_MASK);
+ clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK >>
+ clk_configs->m250_mux.shift;
clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parents,
ARRAY_SIZE(mux_parents), &clk_mux_ops,
&clk_configs->m250_mux.hw);
return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
!(value & DMA_BUS_MODE_SFT_RESET),
- 10000, 100000);
+ 10000, 1000000);
}
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
}
/**
+ * stmmac_free_tx_skbufs - free TX skb buffers
+ * @priv: private structure
+ */
+static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
+{
+ u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ dma_free_tx_skbufs(priv, queue);
+}
+
+/**
* free_dma_rx_desc_resources - free RX dma desc resources
* @priv: private structure
*/
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
- if (priv->eee_enabled)
- del_timer_sync(&priv->eee_ctrl_timer);
-
if (device_may_wakeup(priv->device))
phylink_speed_down(priv->phylink, false);
/* Stop and disconnect the PHY */
if (priv->lpi_irq > 0)
free_irq(priv->lpi_irq, dev);
+ if (priv->eee_enabled) {
+ priv->tx_path_in_lpi_mode = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ }
+
/* Stop TX/RX DMA and clear the descriptors */
stmmac_stop_all_dma(priv);
dev_info(priv->device, "SPH feature enabled\n");
}
+ /* The current IP register MAC_HW_Feature1[ADDR64] only define
+ * 32/40/64 bit width, but some SOC support others like i.MX8MP
+ * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
+ * So overwrite dma_cap.addr64 according to HW real design.
+ */
+ if (priv->plat->addr64)
+ priv->dma_cap.addr64 = priv->plat->addr64;
+
if (priv->dma_cap.addr64) {
ret = dma_set_mask_and_coherent(device,
DMA_BIT_MASK(priv->dma_cap.addr64));
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
del_timer_sync(&priv->tx_queue[chan].txtimer);
+ if (priv->eee_enabled) {
+ priv->tx_path_in_lpi_mode = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ }
+
/* Stop TX/RX DMA */
stmmac_stop_all_dma(priv);
return ret;
}
+ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ rtnl_lock();
+ phylink_start(priv->phylink);
+ /* We may have called phylink_speed_down before */
+ phylink_speed_up(priv->phylink);
+ rtnl_unlock();
+ }
+
rtnl_lock();
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
+ stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv);
stmmac_hw_setup(ndev, false);
mutex_unlock(&priv->lock);
rtnl_unlock();
- if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
- rtnl_lock();
- phylink_start(priv->phylink);
- /* We may have called phylink_speed_down before */
- phylink_speed_up(priv->phylink);
- rtnl_unlock();
- }
-
phylink_mac_change(priv->phylink, true);
netif_device_attach(ndev);
if (!priv->xdpi.prog && !prog)
return 0;
- if (!xdp_attachment_flags_ok(&priv->xdpi, bpf))
- return -EBUSY;
-
WRITE_ONCE(priv->xdp_prog, prog);
xdp_attachment_setup(&priv->xdpi, bpf);
struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
struct temac_local *lp;
struct net_device *ndev;
- struct resource *res;
const void *addr;
__be32 *p;
bool little_endian;
of_node_put(dma_np);
} else if (pdata) {
/* 2nd memory resource specifies DMA registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- lp->sdma_regs = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!lp->sdma_regs) {
+ lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(lp->sdma_regs)) {
dev_err(&pdev->dev,
"could not map DMA registers\n");
- return -ENOMEM;
+ return PTR_ERR(lp->sdma_regs);
}
if (pdata->dma_little_endian) {
lp->dma_in = temac_dma_in32_le;
skb_dst_set(skb, &tun_dst->dst);
/* Ignore packet loops (and multicast echo) */
- if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
- goto rx_error;
-
- switch (skb_protocol(skb, true)) {
- case htons(ETH_P_IP):
- if (pskb_may_pull(skb, sizeof(struct iphdr)))
- goto rx_error;
- break;
- case htons(ETH_P_IPV6):
- if (pskb_may_pull(skb, sizeof(struct ipv6hdr)))
- goto rx_error;
- break;
- default:
- goto rx_error;
+ if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
+ geneve->dev->stats.rx_errors++;
+ goto drop;
}
+
oiph = skb_network_header(skb);
skb_reset_network_header(skb);
dev_sw_netstats_rx_add(geneve->dev, len);
return;
-rx_error:
- geneve->dev->stats.rx_errors++;
drop:
/* Consume bad packet */
kfree_skb(skb);
/* The allocator will give us a power-of-2 number of pages. But we
* can't guarantee that, so request it. That way we won't waste any
* memory that would be available beyond the required space.
+ *
+ * Note that gsi_trans_pool_exit_dma() assumes the total allocated
+ * size is exactly (count * size).
*/
total_size = get_order(total_size) << PAGE_SHIFT;
void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
{
- dma_free_coherent(dev, pool->size, pool->base, pool->addr);
+ size_t total_size = pool->count * pool->size;
+
+ dma_free_coherent(dev, total_size, pool->base, pool->addr);
memset(pool, 0, sizeof(*pool));
}
nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
{
struct nsim_bpf_bound_prog *state;
+ int ret = 0;
state = env->prog->aux->offload->dev_priv;
if (state->nsim_dev->bpf_bind_verifier_delay && !insn_idx)
msleep(state->nsim_dev->bpf_bind_verifier_delay);
- if (insn_idx == env->prog->len - 1)
+ if (insn_idx == env->prog->len - 1) {
pr_vlog(env, "Hello from netdevsim!\n");
- return 0;
+ if (!state->nsim_dev->bpf_bind_verifier_accept)
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
}
static int nsim_bpf_finalize(struct bpf_verifier_env *env)
{
int err;
- if (!xdp_attachment_flags_ok(xdp, bpf))
- return -EBUSY;
-
if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) {
NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS");
return -EOPNOTSUPP;
&nsim_dev->bpf_bind_accept);
debugfs_create_u32("bpf_bind_verifier_delay", 0600, nsim_dev->ddir,
&nsim_dev->bpf_bind_verifier_delay);
+ nsim_dev->bpf_bind_verifier_accept = true;
+ debugfs_create_bool("bpf_bind_verifier_accept", 0600, nsim_dev->ddir,
+ &nsim_dev->bpf_bind_verifier_accept);
return 0;
}
struct dentry *take_snapshot;
struct bpf_offload_dev *bpf_dev;
bool bpf_bind_accept;
+ bool bpf_bind_verifier_accept;
u32 bpf_bind_verifier_delay;
struct dentry *ddir_bpf_bound_progs;
u32 prog_id_gen;
int orig_iif = skb->skb_iif;
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
bool is_ndisc = ipv6_ndisc_frame(skb);
+ bool is_ll_src;
/* loopback, multicast & non-ND link-local traffic; do not push through
- * packet taps again. Reset pkt_type for upper layers to process skb
+ * packet taps again. Reset pkt_type for upper layers to process skb.
+ * for packets with lladdr src, however, skip so that the dst can be
+ * determine at input using original ifindex in the case that daddr
+ * needs strict
*/
- if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
+ is_ll_src = ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL;
+ if (skb->pkt_type == PACKET_LOOPBACK ||
+ (need_strict && !is_ndisc && !is_ll_src)) {
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr)
{
/*
- * The signal type is GPIO if the signal name has "GPI" as a prefix.
- * strncmp (rather than strcmp) is used to implement the prefix
- * requirement.
+ * We need to differentiate between GPIO and non-GPIO signals to
+ * implement the gpio_request_enable() interface. For better or worse
+ * the ASPEED pinctrl driver uses the expression names to determine
+ * whether an expression will mux a pin for GPIO.
*
- * expr->signal might look like "GPIOB1" in the GPIO case.
- * expr->signal might look like "GPIT0" in the GPI case.
+ * Generally we have the following - A GPIO such as B1 has:
+ *
+ * - expr->signal set to "GPIOB1"
+ * - expr->function set to "GPIOB1"
+ *
+ * Using this fact we can determine whether the provided expression is
+ * a GPIO expression by testing the signal name for the string prefix
+ * "GPIO".
+ *
+ * However, some GPIOs are input-only, and the ASPEED datasheets name
+ * them differently. An input-only GPIO such as T0 has:
+ *
+ * - expr->signal set to "GPIT0"
+ * - expr->function set to "GPIT0"
+ *
+ * It's tempting to generalise the prefix test from "GPIO" to "GPI" to
+ * account for both GPIOs and GPIs, but in doing so we run aground on
+ * another feature:
+ *
+ * Some pins in the ASPEED BMC SoCs have a "pass-through" GPIO
+ * function where the input state of one pin is replicated as the
+ * output state of another (as if they were shorted together - a mux
+ * configuration that is typically enabled by hardware strapping).
+ * This feature allows the BMC to pass e.g. power button state through
+ * to the host while the BMC is yet to boot, but take control of the
+ * button state once the BMC has booted by muxing each pin as a
+ * separate, pin-specific GPIO.
+ *
+ * Conceptually this pass-through mode is a form of GPIO and is named
+ * as such in the datasheets, e.g. "GPID0". This naming similarity
+ * trips us up with the simple GPI-prefixed-signal-name scheme
+ * discussed above, as the pass-through configuration is not what we
+ * want when muxing a pin as GPIO for the GPIO subsystem.
+ *
+ * On e.g. the AST2400, a pass-through function "GPID0" is grouped on
+ * balls A18 and D16, where we have:
+ *
+ * For ball A18:
+ * - expr->signal set to "GPID0IN"
+ * - expr->function set to "GPID0"
+ *
+ * For ball D16:
+ * - expr->signal set to "GPID0OUT"
+ * - expr->function set to "GPID0"
+ *
+ * By contrast, the pin-specific GPIO expressions for the same pins are
+ * as follows:
+ *
+ * For ball A18:
+ * - expr->signal looks like "GPIOD0"
+ * - expr->function looks like "GPIOD0"
+ *
+ * For ball D16:
+ * - expr->signal looks like "GPIOD1"
+ * - expr->function looks like "GPIOD1"
+ *
+ * Testing both the signal _and_ function names gives us the means
+ * differentiate the pass-through GPIO pinmux configuration from the
+ * pin-specific configuration that the GPIO subsystem is after: An
+ * expression is a pin-specific (non-pass-through) GPIO configuration
+ * if the signal prefix is "GPI" and the signal name matches the
+ * function name.
*/
- return strncmp(expr->signal, "GPI", 3) == 0;
+ return !strncmp(expr->signal, "GPI", 3) &&
+ !strcmp(expr->signal, expr->function);
}
static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs)
* evaluation of the descriptors.
*
* @signal: The signal name for the priority level on the pin. If the signal
- * type is GPIO, then the signal name must begin with the string
- * "GPIO", e.g. GPIOA0, GPIOT4 etc.
+ * type is GPIO, then the signal name must begin with the
+ * prefix "GPI", e.g. GPIOA0, GPIT0 etc.
* @function: The name of the function the signal participates in for the
- * associated expression
+ * associated expression. For pin-specific GPIO, the function
+ * name must match the signal name.
* @ndescs: The number of signal descriptors in the expression
* @descs: Pointer to an array of signal descriptors that comprise the
* function expression
break;
case PIN_CONFIG_INPUT_DEBOUNCE:
debounce = readl(db_reg);
- debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
if (arg)
conf |= BYT_DEBOUNCE_EN;
switch (arg) {
case 375:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_375US;
break;
case 750:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_750US;
break;
case 1500:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_1500US;
break;
case 3000:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_3MS;
break;
case 6000:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_6MS;
break;
case 12000:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_12MS;
break;
case 24000:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_24MS;
break;
default:
value |= PADCFG0_PMODE_GPIO;
/* Disable input and output buffers */
- value &= ~PADCFG0_GPIORXDIS;
- value &= ~PADCFG0_GPIOTXDIS;
+ value |= PADCFG0_GPIORXDIS;
+ value |= PADCFG0_GPIOTXDIS;
/* Disable SCI/SMI/NMI generation */
value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
#define JSL_PAD_OWN 0x020
#define JSL_PADCFGLOCK 0x080
-#define JSL_HOSTSW_OWN 0x0b0
+#define JSL_HOSTSW_OWN 0x0c0
#define JSL_GPI_IS 0x100
#define JSL_GPI_IE 0x120
PINCTRL_PIN(17, "EMMC_CLK"),
PINCTRL_PIN(18, "EMMC_RESETB"),
PINCTRL_PIN(19, "A4WP_PRESENT"),
+ /* SPI */
+ PINCTRL_PIN(20, "SPI0_IO_2"),
+ PINCTRL_PIN(21, "SPI0_IO_3"),
+ PINCTRL_PIN(22, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(23, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(24, "SPI0_TPM_CSB"),
+ PINCTRL_PIN(25, "SPI0_FLASH_0_CSB"),
+ PINCTRL_PIN(26, "SPI0_FLASH_1_CSB"),
+ PINCTRL_PIN(27, "SPI0_CLK"),
+ PINCTRL_PIN(28, "SPI0_CLK_LOOPBK"),
/* GPP_B */
- PINCTRL_PIN(20, "CORE_VID_0"),
- PINCTRL_PIN(21, "CORE_VID_1"),
- PINCTRL_PIN(22, "VRALERTB"),
- PINCTRL_PIN(23, "CPU_GP_2"),
- PINCTRL_PIN(24, "CPU_GP_3"),
- PINCTRL_PIN(25, "SRCCLKREQB_0"),
- PINCTRL_PIN(26, "SRCCLKREQB_1"),
- PINCTRL_PIN(27, "SRCCLKREQB_2"),
- PINCTRL_PIN(28, "SRCCLKREQB_3"),
- PINCTRL_PIN(29, "SRCCLKREQB_4"),
- PINCTRL_PIN(30, "SRCCLKREQB_5"),
- PINCTRL_PIN(31, "PMCALERTB"),
- PINCTRL_PIN(32, "SLP_S0B"),
- PINCTRL_PIN(33, "PLTRSTB"),
- PINCTRL_PIN(34, "SPKR"),
- PINCTRL_PIN(35, "GSPI0_CS0B"),
- PINCTRL_PIN(36, "GSPI0_CLK"),
- PINCTRL_PIN(37, "GSPI0_MISO"),
- PINCTRL_PIN(38, "GSPI0_MOSI"),
- PINCTRL_PIN(39, "GSPI1_CS0B"),
- PINCTRL_PIN(40, "GSPI1_CLK"),
- PINCTRL_PIN(41, "GSPI1_MISO"),
- PINCTRL_PIN(42, "GSPI1_MOSI"),
- PINCTRL_PIN(43, "DDSP_HPD_A"),
- PINCTRL_PIN(44, "GSPI0_CLK_LOOPBK"),
- PINCTRL_PIN(45, "GSPI1_CLK_LOOPBK"),
+ PINCTRL_PIN(29, "CORE_VID_0"),
+ PINCTRL_PIN(30, "CORE_VID_1"),
+ PINCTRL_PIN(31, "VRALERTB"),
+ PINCTRL_PIN(32, "CPU_GP_2"),
+ PINCTRL_PIN(33, "CPU_GP_3"),
+ PINCTRL_PIN(34, "SRCCLKREQB_0"),
+ PINCTRL_PIN(35, "SRCCLKREQB_1"),
+ PINCTRL_PIN(36, "SRCCLKREQB_2"),
+ PINCTRL_PIN(37, "SRCCLKREQB_3"),
+ PINCTRL_PIN(38, "SRCCLKREQB_4"),
+ PINCTRL_PIN(39, "SRCCLKREQB_5"),
+ PINCTRL_PIN(40, "PMCALERTB"),
+ PINCTRL_PIN(41, "SLP_S0B"),
+ PINCTRL_PIN(42, "PLTRSTB"),
+ PINCTRL_PIN(43, "SPKR"),
+ PINCTRL_PIN(44, "GSPI0_CS0B"),
+ PINCTRL_PIN(45, "GSPI0_CLK"),
+ PINCTRL_PIN(46, "GSPI0_MISO"),
+ PINCTRL_PIN(47, "GSPI0_MOSI"),
+ PINCTRL_PIN(48, "GSPI1_CS0B"),
+ PINCTRL_PIN(49, "GSPI1_CLK"),
+ PINCTRL_PIN(50, "GSPI1_MISO"),
+ PINCTRL_PIN(51, "GSPI1_MOSI"),
+ PINCTRL_PIN(52, "DDSP_HPD_A"),
+ PINCTRL_PIN(53, "GSPI0_CLK_LOOPBK"),
+ PINCTRL_PIN(54, "GSPI1_CLK_LOOPBK"),
/* GPP_A */
- PINCTRL_PIN(46, "ESPI_IO_0"),
- PINCTRL_PIN(47, "ESPI_IO_1"),
- PINCTRL_PIN(48, "ESPI_IO_2"),
- PINCTRL_PIN(49, "ESPI_IO_3"),
- PINCTRL_PIN(50, "ESPI_CSB"),
- PINCTRL_PIN(51, "ESPI_CLK"),
- PINCTRL_PIN(52, "ESPI_RESETB"),
- PINCTRL_PIN(53, "SMBCLK"),
- PINCTRL_PIN(54, "SMBDATA"),
- PINCTRL_PIN(55, "SMBALERTB"),
- PINCTRL_PIN(56, "CPU_GP_0"),
- PINCTRL_PIN(57, "CPU_GP_1"),
- PINCTRL_PIN(58, "USB2_OCB_1"),
- PINCTRL_PIN(59, "USB2_OCB_2"),
- PINCTRL_PIN(60, "USB2_OCB_3"),
- PINCTRL_PIN(61, "DDSP_HPD_A_TIME_SYNC_0"),
- PINCTRL_PIN(62, "DDSP_HPD_B"),
- PINCTRL_PIN(63, "DDSP_HPD_C"),
- PINCTRL_PIN(64, "USB2_OCB_0"),
- PINCTRL_PIN(65, "PCHHOTB"),
- PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"),
+ PINCTRL_PIN(55, "ESPI_IO_0"),
+ PINCTRL_PIN(56, "ESPI_IO_1"),
+ PINCTRL_PIN(57, "ESPI_IO_2"),
+ PINCTRL_PIN(58, "ESPI_IO_3"),
+ PINCTRL_PIN(59, "ESPI_CSB"),
+ PINCTRL_PIN(60, "ESPI_CLK"),
+ PINCTRL_PIN(61, "ESPI_RESETB"),
+ PINCTRL_PIN(62, "SMBCLK"),
+ PINCTRL_PIN(63, "SMBDATA"),
+ PINCTRL_PIN(64, "SMBALERTB"),
+ PINCTRL_PIN(65, "CPU_GP_0"),
+ PINCTRL_PIN(66, "CPU_GP_1"),
+ PINCTRL_PIN(67, "USB2_OCB_1"),
+ PINCTRL_PIN(68, "USB2_OCB_2"),
+ PINCTRL_PIN(69, "USB2_OCB_3"),
+ PINCTRL_PIN(70, "DDSP_HPD_A_TIME_SYNC_0"),
+ PINCTRL_PIN(71, "DDSP_HPD_B"),
+ PINCTRL_PIN(72, "DDSP_HPD_C"),
+ PINCTRL_PIN(73, "USB2_OCB_0"),
+ PINCTRL_PIN(74, "PCHHOTB"),
+ PINCTRL_PIN(75, "ESPI_CLK_LOOPBK"),
/* GPP_S */
- PINCTRL_PIN(67, "SNDW1_CLK"),
- PINCTRL_PIN(68, "SNDW1_DATA"),
- PINCTRL_PIN(69, "SNDW2_CLK"),
- PINCTRL_PIN(70, "SNDW2_DATA"),
- PINCTRL_PIN(71, "SNDW1_CLK"),
- PINCTRL_PIN(72, "SNDW1_DATA"),
- PINCTRL_PIN(73, "SNDW4_CLK_DMIC_CLK_0"),
- PINCTRL_PIN(74, "SNDW4_DATA_DMIC_DATA_0"),
+ PINCTRL_PIN(76, "SNDW1_CLK"),
+ PINCTRL_PIN(77, "SNDW1_DATA"),
+ PINCTRL_PIN(78, "SNDW2_CLK"),
+ PINCTRL_PIN(79, "SNDW2_DATA"),
+ PINCTRL_PIN(80, "SNDW1_CLK"),
+ PINCTRL_PIN(81, "SNDW1_DATA"),
+ PINCTRL_PIN(82, "SNDW4_CLK_DMIC_CLK_0"),
+ PINCTRL_PIN(83, "SNDW4_DATA_DMIC_DATA_0"),
/* GPP_R */
- PINCTRL_PIN(75, "HDA_BCLK"),
- PINCTRL_PIN(76, "HDA_SYNC"),
- PINCTRL_PIN(77, "HDA_SDO"),
- PINCTRL_PIN(78, "HDA_SDI_0"),
- PINCTRL_PIN(79, "HDA_RSTB"),
- PINCTRL_PIN(80, "HDA_SDI_1"),
- PINCTRL_PIN(81, "I2S1_SFRM"),
- PINCTRL_PIN(82, "I2S1_TXD"),
+ PINCTRL_PIN(84, "HDA_BCLK"),
+ PINCTRL_PIN(85, "HDA_SYNC"),
+ PINCTRL_PIN(86, "HDA_SDO"),
+ PINCTRL_PIN(87, "HDA_SDI_0"),
+ PINCTRL_PIN(88, "HDA_RSTB"),
+ PINCTRL_PIN(89, "HDA_SDI_1"),
+ PINCTRL_PIN(90, "I2S1_SFRM"),
+ PINCTRL_PIN(91, "I2S1_TXD"),
/* GPP_H */
- PINCTRL_PIN(83, "GPPC_H_0"),
- PINCTRL_PIN(84, "SD_PWR_EN_B"),
- PINCTRL_PIN(85, "MODEM_CLKREQ"),
- PINCTRL_PIN(86, "SX_EXIT_HOLDOFFB"),
- PINCTRL_PIN(87, "I2C2_SDA"),
- PINCTRL_PIN(88, "I2C2_SCL"),
- PINCTRL_PIN(89, "I2C3_SDA"),
- PINCTRL_PIN(90, "I2C3_SCL"),
- PINCTRL_PIN(91, "I2C4_SDA"),
- PINCTRL_PIN(92, "I2C4_SCL"),
- PINCTRL_PIN(93, "CPU_VCCIO_PWR_GATEB"),
- PINCTRL_PIN(94, "I2S2_SCLK"),
- PINCTRL_PIN(95, "I2S2_SFRM"),
- PINCTRL_PIN(96, "I2S2_TXD"),
- PINCTRL_PIN(97, "I2S2_RXD"),
- PINCTRL_PIN(98, "I2S1_SCLK"),
- PINCTRL_PIN(99, "GPPC_H_16"),
- PINCTRL_PIN(100, "GPPC_H_17"),
- PINCTRL_PIN(101, "GPPC_H_18"),
- PINCTRL_PIN(102, "GPPC_H_19"),
- PINCTRL_PIN(103, "GPPC_H_20"),
- PINCTRL_PIN(104, "GPPC_H_21"),
- PINCTRL_PIN(105, "GPPC_H_22"),
- PINCTRL_PIN(106, "GPPC_H_23"),
+ PINCTRL_PIN(92, "GPPC_H_0"),
+ PINCTRL_PIN(93, "SD_PWR_EN_B"),
+ PINCTRL_PIN(94, "MODEM_CLKREQ"),
+ PINCTRL_PIN(95, "SX_EXIT_HOLDOFFB"),
+ PINCTRL_PIN(96, "I2C2_SDA"),
+ PINCTRL_PIN(97, "I2C2_SCL"),
+ PINCTRL_PIN(98, "I2C3_SDA"),
+ PINCTRL_PIN(99, "I2C3_SCL"),
+ PINCTRL_PIN(100, "I2C4_SDA"),
+ PINCTRL_PIN(101, "I2C4_SCL"),
+ PINCTRL_PIN(102, "CPU_VCCIO_PWR_GATEB"),
+ PINCTRL_PIN(103, "I2S2_SCLK"),
+ PINCTRL_PIN(104, "I2S2_SFRM"),
+ PINCTRL_PIN(105, "I2S2_TXD"),
+ PINCTRL_PIN(106, "I2S2_RXD"),
+ PINCTRL_PIN(107, "I2S1_SCLK"),
+ PINCTRL_PIN(108, "GPPC_H_16"),
+ PINCTRL_PIN(109, "GPPC_H_17"),
+ PINCTRL_PIN(110, "GPPC_H_18"),
+ PINCTRL_PIN(111, "GPPC_H_19"),
+ PINCTRL_PIN(112, "GPPC_H_20"),
+ PINCTRL_PIN(113, "GPPC_H_21"),
+ PINCTRL_PIN(114, "GPPC_H_22"),
+ PINCTRL_PIN(115, "GPPC_H_23"),
/* GPP_D */
- PINCTRL_PIN(107, "SPI1_CSB"),
- PINCTRL_PIN(108, "SPI1_CLK"),
- PINCTRL_PIN(109, "SPI1_MISO_IO_1"),
- PINCTRL_PIN(110, "SPI1_MOSI_IO_0"),
- PINCTRL_PIN(111, "ISH_I2C0_SDA"),
- PINCTRL_PIN(112, "ISH_I2C0_SCL"),
- PINCTRL_PIN(113, "ISH_I2C1_SDA"),
- PINCTRL_PIN(114, "ISH_I2C1_SCL"),
- PINCTRL_PIN(115, "ISH_SPI_CSB"),
- PINCTRL_PIN(116, "ISH_SPI_CLK"),
- PINCTRL_PIN(117, "ISH_SPI_MISO"),
- PINCTRL_PIN(118, "ISH_SPI_MOSI"),
- PINCTRL_PIN(119, "ISH_UART0_RXD"),
- PINCTRL_PIN(120, "ISH_UART0_TXD"),
- PINCTRL_PIN(121, "ISH_UART0_RTSB"),
- PINCTRL_PIN(122, "ISH_UART0_CTSB"),
- PINCTRL_PIN(123, "SPI1_IO_2"),
- PINCTRL_PIN(124, "SPI1_IO_3"),
- PINCTRL_PIN(125, "I2S_MCLK"),
- PINCTRL_PIN(126, "CNV_MFUART2_RXD"),
- PINCTRL_PIN(127, "CNV_MFUART2_TXD"),
- PINCTRL_PIN(128, "CNV_PA_BLANKING"),
- PINCTRL_PIN(129, "I2C5_SDA"),
- PINCTRL_PIN(130, "I2C5_SCL"),
- PINCTRL_PIN(131, "GSPI2_CLK_LOOPBK"),
- PINCTRL_PIN(132, "SPI1_CLK_LOOPBK"),
+ PINCTRL_PIN(116, "SPI1_CSB"),
+ PINCTRL_PIN(117, "SPI1_CLK"),
+ PINCTRL_PIN(118, "SPI1_MISO_IO_1"),
+ PINCTRL_PIN(119, "SPI1_MOSI_IO_0"),
+ PINCTRL_PIN(120, "ISH_I2C0_SDA"),
+ PINCTRL_PIN(121, "ISH_I2C0_SCL"),
+ PINCTRL_PIN(122, "ISH_I2C1_SDA"),
+ PINCTRL_PIN(123, "ISH_I2C1_SCL"),
+ PINCTRL_PIN(124, "ISH_SPI_CSB"),
+ PINCTRL_PIN(125, "ISH_SPI_CLK"),
+ PINCTRL_PIN(126, "ISH_SPI_MISO"),
+ PINCTRL_PIN(127, "ISH_SPI_MOSI"),
+ PINCTRL_PIN(128, "ISH_UART0_RXD"),
+ PINCTRL_PIN(129, "ISH_UART0_TXD"),
+ PINCTRL_PIN(130, "ISH_UART0_RTSB"),
+ PINCTRL_PIN(131, "ISH_UART0_CTSB"),
+ PINCTRL_PIN(132, "SPI1_IO_2"),
+ PINCTRL_PIN(133, "SPI1_IO_3"),
+ PINCTRL_PIN(134, "I2S_MCLK"),
+ PINCTRL_PIN(135, "CNV_MFUART2_RXD"),
+ PINCTRL_PIN(136, "CNV_MFUART2_TXD"),
+ PINCTRL_PIN(137, "CNV_PA_BLANKING"),
+ PINCTRL_PIN(138, "I2C5_SDA"),
+ PINCTRL_PIN(139, "I2C5_SCL"),
+ PINCTRL_PIN(140, "GSPI2_CLK_LOOPBK"),
+ PINCTRL_PIN(141, "SPI1_CLK_LOOPBK"),
/* vGPIO */
- PINCTRL_PIN(133, "CNV_BTEN"),
- PINCTRL_PIN(134, "CNV_WCEN"),
- PINCTRL_PIN(135, "CNV_BT_HOST_WAKEB"),
- PINCTRL_PIN(136, "CNV_BT_IF_SELECT"),
- PINCTRL_PIN(137, "vCNV_BT_UART_TXD"),
- PINCTRL_PIN(138, "vCNV_BT_UART_RXD"),
- PINCTRL_PIN(139, "vCNV_BT_UART_CTS_B"),
- PINCTRL_PIN(140, "vCNV_BT_UART_RTS_B"),
- PINCTRL_PIN(141, "vCNV_MFUART1_TXD"),
- PINCTRL_PIN(142, "vCNV_MFUART1_RXD"),
- PINCTRL_PIN(143, "vCNV_MFUART1_CTS_B"),
- PINCTRL_PIN(144, "vCNV_MFUART1_RTS_B"),
- PINCTRL_PIN(145, "vUART0_TXD"),
- PINCTRL_PIN(146, "vUART0_RXD"),
- PINCTRL_PIN(147, "vUART0_CTS_B"),
- PINCTRL_PIN(148, "vUART0_RTS_B"),
- PINCTRL_PIN(149, "vISH_UART0_TXD"),
- PINCTRL_PIN(150, "vISH_UART0_RXD"),
- PINCTRL_PIN(151, "vISH_UART0_CTS_B"),
- PINCTRL_PIN(152, "vISH_UART0_RTS_B"),
- PINCTRL_PIN(153, "vCNV_BT_I2S_BCLK"),
- PINCTRL_PIN(154, "vCNV_BT_I2S_WS_SYNC"),
- PINCTRL_PIN(155, "vCNV_BT_I2S_SDO"),
- PINCTRL_PIN(156, "vCNV_BT_I2S_SDI"),
- PINCTRL_PIN(157, "vI2S2_SCLK"),
- PINCTRL_PIN(158, "vI2S2_SFRM"),
- PINCTRL_PIN(159, "vI2S2_TXD"),
- PINCTRL_PIN(160, "vI2S2_RXD"),
- PINCTRL_PIN(161, "vSD3_CD_B"),
+ PINCTRL_PIN(142, "CNV_BTEN"),
+ PINCTRL_PIN(143, "CNV_WCEN"),
+ PINCTRL_PIN(144, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(145, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(146, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(147, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(148, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(149, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(150, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(151, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(152, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(153, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(154, "vUART0_TXD"),
+ PINCTRL_PIN(155, "vUART0_RXD"),
+ PINCTRL_PIN(156, "vUART0_CTS_B"),
+ PINCTRL_PIN(157, "vUART0_RTS_B"),
+ PINCTRL_PIN(158, "vISH_UART0_TXD"),
+ PINCTRL_PIN(159, "vISH_UART0_RXD"),
+ PINCTRL_PIN(160, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(161, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(162, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(163, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(164, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(165, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(166, "vI2S2_SCLK"),
+ PINCTRL_PIN(167, "vI2S2_SFRM"),
+ PINCTRL_PIN(168, "vI2S2_TXD"),
+ PINCTRL_PIN(169, "vI2S2_RXD"),
+ PINCTRL_PIN(170, "vSD3_CD_B"),
/* GPP_C */
- PINCTRL_PIN(162, "GPPC_C_0"),
- PINCTRL_PIN(163, "GPPC_C_1"),
- PINCTRL_PIN(164, "GPPC_C_2"),
- PINCTRL_PIN(165, "GPPC_C_3"),
- PINCTRL_PIN(166, "GPPC_C_4"),
- PINCTRL_PIN(167, "GPPC_C_5"),
- PINCTRL_PIN(168, "SUSWARNB_SUSPWRDNACK"),
- PINCTRL_PIN(169, "SUSACKB"),
- PINCTRL_PIN(170, "UART0_RXD"),
- PINCTRL_PIN(171, "UART0_TXD"),
- PINCTRL_PIN(172, "UART0_RTSB"),
- PINCTRL_PIN(173, "UART0_CTSB"),
- PINCTRL_PIN(174, "UART1_RXD"),
- PINCTRL_PIN(175, "UART1_TXD"),
- PINCTRL_PIN(176, "UART1_RTSB"),
- PINCTRL_PIN(177, "UART1_CTSB"),
- PINCTRL_PIN(178, "I2C0_SDA"),
- PINCTRL_PIN(179, "I2C0_SCL"),
- PINCTRL_PIN(180, "I2C1_SDA"),
- PINCTRL_PIN(181, "I2C1_SCL"),
- PINCTRL_PIN(182, "UART2_RXD"),
- PINCTRL_PIN(183, "UART2_TXD"),
- PINCTRL_PIN(184, "UART2_RTSB"),
- PINCTRL_PIN(185, "UART2_CTSB"),
+ PINCTRL_PIN(171, "GPPC_C_0"),
+ PINCTRL_PIN(172, "GPPC_C_1"),
+ PINCTRL_PIN(173, "GPPC_C_2"),
+ PINCTRL_PIN(174, "GPPC_C_3"),
+ PINCTRL_PIN(175, "GPPC_C_4"),
+ PINCTRL_PIN(176, "GPPC_C_5"),
+ PINCTRL_PIN(177, "SUSWARNB_SUSPWRDNACK"),
+ PINCTRL_PIN(178, "SUSACKB"),
+ PINCTRL_PIN(179, "UART0_RXD"),
+ PINCTRL_PIN(180, "UART0_TXD"),
+ PINCTRL_PIN(181, "UART0_RTSB"),
+ PINCTRL_PIN(182, "UART0_CTSB"),
+ PINCTRL_PIN(183, "UART1_RXD"),
+ PINCTRL_PIN(184, "UART1_TXD"),
+ PINCTRL_PIN(185, "UART1_RTSB"),
+ PINCTRL_PIN(186, "UART1_CTSB"),
+ PINCTRL_PIN(187, "I2C0_SDA"),
+ PINCTRL_PIN(188, "I2C0_SCL"),
+ PINCTRL_PIN(189, "I2C1_SDA"),
+ PINCTRL_PIN(190, "I2C1_SCL"),
+ PINCTRL_PIN(191, "UART2_RXD"),
+ PINCTRL_PIN(192, "UART2_TXD"),
+ PINCTRL_PIN(193, "UART2_RTSB"),
+ PINCTRL_PIN(194, "UART2_CTSB"),
/* HVCMOS */
- PINCTRL_PIN(186, "L_BKLTEN"),
- PINCTRL_PIN(187, "L_BKLTCTL"),
- PINCTRL_PIN(188, "L_VDDEN"),
- PINCTRL_PIN(189, "SYS_PWROK"),
- PINCTRL_PIN(190, "SYS_RESETB"),
- PINCTRL_PIN(191, "MLK_RSTB"),
+ PINCTRL_PIN(195, "L_BKLTEN"),
+ PINCTRL_PIN(196, "L_BKLTCTL"),
+ PINCTRL_PIN(197, "L_VDDEN"),
+ PINCTRL_PIN(198, "SYS_PWROK"),
+ PINCTRL_PIN(199, "SYS_RESETB"),
+ PINCTRL_PIN(200, "MLK_RSTB"),
/* GPP_E */
- PINCTRL_PIN(192, "ISH_GP_0"),
- PINCTRL_PIN(193, "ISH_GP_1"),
- PINCTRL_PIN(194, "IMGCLKOUT_1"),
- PINCTRL_PIN(195, "ISH_GP_2"),
- PINCTRL_PIN(196, "IMGCLKOUT_2"),
- PINCTRL_PIN(197, "SATA_LEDB"),
- PINCTRL_PIN(198, "IMGCLKOUT_3"),
- PINCTRL_PIN(199, "ISH_GP_3"),
- PINCTRL_PIN(200, "ISH_GP_4"),
- PINCTRL_PIN(201, "ISH_GP_5"),
- PINCTRL_PIN(202, "ISH_GP_6"),
- PINCTRL_PIN(203, "ISH_GP_7"),
- PINCTRL_PIN(204, "IMGCLKOUT_4"),
- PINCTRL_PIN(205, "DDPA_CTRLCLK"),
- PINCTRL_PIN(206, "DDPA_CTRLDATA"),
- PINCTRL_PIN(207, "DDPB_CTRLCLK"),
- PINCTRL_PIN(208, "DDPB_CTRLDATA"),
- PINCTRL_PIN(209, "DDPC_CTRLCLK"),
- PINCTRL_PIN(210, "DDPC_CTRLDATA"),
- PINCTRL_PIN(211, "IMGCLKOUT_5"),
- PINCTRL_PIN(212, "CNV_BRI_DT"),
- PINCTRL_PIN(213, "CNV_BRI_RSP"),
- PINCTRL_PIN(214, "CNV_RGI_DT"),
- PINCTRL_PIN(215, "CNV_RGI_RSP"),
+ PINCTRL_PIN(201, "ISH_GP_0"),
+ PINCTRL_PIN(202, "ISH_GP_1"),
+ PINCTRL_PIN(203, "IMGCLKOUT_1"),
+ PINCTRL_PIN(204, "ISH_GP_2"),
+ PINCTRL_PIN(205, "IMGCLKOUT_2"),
+ PINCTRL_PIN(206, "SATA_LEDB"),
+ PINCTRL_PIN(207, "IMGCLKOUT_3"),
+ PINCTRL_PIN(208, "ISH_GP_3"),
+ PINCTRL_PIN(209, "ISH_GP_4"),
+ PINCTRL_PIN(210, "ISH_GP_5"),
+ PINCTRL_PIN(211, "ISH_GP_6"),
+ PINCTRL_PIN(212, "ISH_GP_7"),
+ PINCTRL_PIN(213, "IMGCLKOUT_4"),
+ PINCTRL_PIN(214, "DDPA_CTRLCLK"),
+ PINCTRL_PIN(215, "DDPA_CTRLDATA"),
+ PINCTRL_PIN(216, "DDPB_CTRLCLK"),
+ PINCTRL_PIN(217, "DDPB_CTRLDATA"),
+ PINCTRL_PIN(218, "DDPC_CTRLCLK"),
+ PINCTRL_PIN(219, "DDPC_CTRLDATA"),
+ PINCTRL_PIN(220, "IMGCLKOUT_5"),
+ PINCTRL_PIN(221, "CNV_BRI_DT"),
+ PINCTRL_PIN(222, "CNV_BRI_RSP"),
+ PINCTRL_PIN(223, "CNV_RGI_DT"),
+ PINCTRL_PIN(224, "CNV_RGI_RSP"),
/* GPP_G */
- PINCTRL_PIN(216, "SD3_CMD"),
- PINCTRL_PIN(217, "SD3_D0"),
- PINCTRL_PIN(218, "SD3_D1"),
- PINCTRL_PIN(219, "SD3_D2"),
- PINCTRL_PIN(220, "SD3_D3"),
- PINCTRL_PIN(221, "SD3_CDB"),
- PINCTRL_PIN(222, "SD3_CLK"),
- PINCTRL_PIN(223, "SD3_WP"),
+ PINCTRL_PIN(225, "SD3_CMD"),
+ PINCTRL_PIN(226, "SD3_D0"),
+ PINCTRL_PIN(227, "SD3_D1"),
+ PINCTRL_PIN(228, "SD3_D2"),
+ PINCTRL_PIN(229, "SD3_D3"),
+ PINCTRL_PIN(230, "SD3_CDB"),
+ PINCTRL_PIN(231, "SD3_CLK"),
+ PINCTRL_PIN(232, "SD3_WP"),
};
static const struct intel_padgroup jsl_community0_gpps[] = {
JSL_GPP(0, 0, 19, 320), /* GPP_F */
- JSL_GPP(1, 20, 45, 32), /* GPP_B */
- JSL_GPP(2, 46, 66, 64), /* GPP_A */
- JSL_GPP(3, 67, 74, 96), /* GPP_S */
- JSL_GPP(4, 75, 82, 128), /* GPP_R */
+ JSL_GPP(1, 20, 28, INTEL_GPIO_BASE_NOMAP), /* SPI */
+ JSL_GPP(2, 29, 54, 32), /* GPP_B */
+ JSL_GPP(3, 55, 75, 64), /* GPP_A */
+ JSL_GPP(4, 76, 83, 96), /* GPP_S */
+ JSL_GPP(5, 84, 91, 128), /* GPP_R */
};
static const struct intel_padgroup jsl_community1_gpps[] = {
- JSL_GPP(0, 83, 106, 160), /* GPP_H */
- JSL_GPP(1, 107, 132, 192), /* GPP_D */
- JSL_GPP(2, 133, 161, 224), /* vGPIO */
- JSL_GPP(3, 162, 185, 256), /* GPP_C */
+ JSL_GPP(0, 92, 115, 160), /* GPP_H */
+ JSL_GPP(1, 116, 141, 192), /* GPP_D */
+ JSL_GPP(2, 142, 170, 224), /* vGPIO */
+ JSL_GPP(3, 171, 194, 256), /* GPP_C */
};
static const struct intel_padgroup jsl_community4_gpps[] = {
- JSL_GPP(0, 186, 191, INTEL_GPIO_BASE_NOMAP), /* HVCMOS */
- JSL_GPP(1, 192, 215, 288), /* GPP_E */
+ JSL_GPP(0, 195, 200, INTEL_GPIO_BASE_NOMAP), /* HVCMOS */
+ JSL_GPP(1, 201, 224, 288), /* GPP_E */
};
static const struct intel_padgroup jsl_community5_gpps[] = {
- JSL_GPP(0, 216, 223, INTEL_GPIO_BASE_ZERO), /* GPP_G */
+ JSL_GPP(0, 225, 232, INTEL_GPIO_BASE_ZERO), /* GPP_G */
};
static const struct intel_community jsl_communities[] = {
- JSL_COMMUNITY(0, 0, 82, jsl_community0_gpps),
- JSL_COMMUNITY(1, 83, 185, jsl_community1_gpps),
- JSL_COMMUNITY(2, 186, 215, jsl_community4_gpps),
- JSL_COMMUNITY(3, 216, 223, jsl_community5_gpps),
+ JSL_COMMUNITY(0, 0, 91, jsl_community0_gpps),
+ JSL_COMMUNITY(1, 92, 194, jsl_community1_gpps),
+ JSL_COMMUNITY(2, 195, 224, jsl_community4_gpps),
+ JSL_COMMUNITY(3, 225, 232, jsl_community5_gpps),
};
static const struct intel_pinctrl_soc_data jsl_soc_data = {
.pm = &jsl_pinctrl_pm_ops,
},
};
-
module_platform_driver(jsl_pinctrl_driver);
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK;
bits |= BUFCFG_PU_EN;
+ /* Set default strength value in case none is given */
+ if (arg == 1)
+ arg = 20000;
+
switch (arg) {
case 50000:
bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT;
mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK;
bits |= BUFCFG_PD_EN;
+ /* Set default strength value in case none is given */
+ if (arg == 1)
+ arg = 20000;
+
switch (arg) {
case 50000:
bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT;
pin_reg &= ~BIT(LEVEL_TRIG_OFF);
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
irq_set_handler_locked(d, handle_edge_irq);
break;
pin_reg &= ~BIT(LEVEL_TRIG_OFF);
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
irq_set_handler_locked(d, handle_edge_irq);
break;
pin_reg &= ~BIT(LEVEL_TRIG_OFF);
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF;
- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
irq_set_handler_locked(d, handle_edge_irq);
break;
pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
- pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
- pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF;
irq_set_handler_locked(d, handle_level_irq);
break;
pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
- pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
- pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF;
irq_set_handler_locked(d, handle_level_irq);
break;
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/irq_poll.h>
-#include <linux/blk-mq-pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
module_param(enable_sdev_max_qd, int, 0444);
MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
-int host_tagset_enable = 1;
-module_param(host_tagset_enable, int, 0444);
-MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
-
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
return 0;
}
-static int megasas_map_queues(struct Scsi_Host *shost)
-{
- struct megasas_instance *instance;
-
- instance = (struct megasas_instance *)shost->hostdata;
-
- if (shost->nr_hw_queues == 1)
- return 0;
-
- return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- instance->pdev, instance->low_latency_index_start);
-}
-
static void megasas_aen_polling(struct work_struct *work);
/**
.eh_timed_out = megasas_reset_timer,
.shost_attrs = megaraid_host_attrs,
.bios_param = megasas_bios_param,
- .map_queues = megasas_map_queues,
.change_queue_depth = scsi_change_queue_depth,
.max_segment_size = 0xffffffff,
};
host->max_lun = MEGASAS_MAX_LUN;
host->max_cmd_len = 16;
- /* Use shared host tagset only for fusion adaptors
- * if there are managed interrupts (smp affinity enabled case).
- * Single msix_vectors in kdump, so shared host tag is also disabled.
- */
-
- host->host_tagset = 0;
- host->nr_hw_queues = 1;
-
- if ((instance->adapter_type != MFI_SERIES) &&
- (instance->msix_vectors > instance->low_latency_index_start) &&
- host_tagset_enable &&
- instance->smp_affinity_enable) {
- host->host_tagset = 1;
- host->nr_hw_queues = instance->msix_vectors -
- instance->low_latency_index_start;
- }
-
- dev_info(&instance->pdev->dev,
- "Max firmware commands: %d shared with nr_hw_queues = %d\n",
- instance->max_fw_cmds, host->nr_hw_queues);
/*
* Notify the mid-layer about the new controller
*/
{
int sdev_busy;
- /* TBD - if sml remove device_busy in future, driver
- * should track counter in internal structure.
- */
- sdev_busy = atomic_read(&scmd->device->device_busy);
+ /* nr_hw_queue = 1 for MegaRAID */
+ struct blk_mq_hw_ctx *hctx =
+ scmd->device->request_queue->queue_hw_ctx[0];
+
+ sdev_busy = atomic_read(&hctx->nr_active);
if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
- sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) {
+ sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
cmd->request_desc->SCSIIO.MSIxIndex =
mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
- } else if (instance->msix_load_balance) {
+ else if (instance->msix_load_balance)
cmd->request_desc->SCSIIO.MSIxIndex =
(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
instance->msix_vectors));
- } else if (instance->host->nr_hw_queues > 1) {
- u32 tag = blk_mq_unique_tag(scmd->request);
-
- cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
- instance->low_latency_index_start;
- } else {
+ else
cmd->request_desc->SCSIIO.MSIxIndex =
instance->reply_map[raw_smp_processor_id()];
- }
}
/**
if (megasas_alloc_cmdlist_fusion(instance))
goto fail_exit;
+ dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
+ instance->max_fw_cmds);
+
/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
instance->perf_mode = MR_BALANCED_PERF_MODE;
- dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n",
- MEGASAS_PERF_MODE_2STR(instance->perf_mode),
- instance->low_latency_index_start);
+ dev_info(&instance->pdev->dev, "Performance mode :%s\n",
+ MEGASAS_PERF_MODE_2STR(instance->perf_mode));
instance->fw_sync_cache_support = (scratch_pad_1 &
MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
_enter(",%s", name);
+ if (fc->source)
+ return invalf(fc, "kAFS: Multiple sources not supported");
+
if (!name) {
printk(KERN_ERR "kAFS: no volume name specified\n");
return -EINVAL;
Choose Y here to disable the use of NFS over UDP. NFS over UDP
on modern networks (1Gb+) can lead to data corruption caused by
fragmentation during high loads.
+
+config NFS_V4_2_READ_PLUS
+ bool "NFS: Enable support for the NFSv4.2 READ_PLUS operation"
+ depends on NFS_V4_2
+ default n
+ help
+ This is intended for developers only. The READ_PLUS operation has
+ been shown to have issues under specific conditions and should not
+ be used in production.
struct nfs_pgio_mirror *pgm;
struct nfs4_ff_layout_mirror *mirror;
struct nfs4_pnfs_ds *ds;
- u32 ds_idx, i;
+ u32 ds_idx;
retry:
ff_layout_pg_check_layout(pgio, req);
goto retry;
}
- for (i = 0; i < pgio->pg_mirror_count; i++) {
- mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
- pgm = &pgio->pg_mirrors[i];
- pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
- }
+ mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
+ pgm = &pgio->pg_mirrors[0];
+ pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
pgio->pg_mirror_idx = ds_idx;
return 1;
}
+static u32
+ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
+{
+ u32 old = desc->pg_mirror_idx;
+
+ desc->pg_mirror_idx = idx;
+ return old;
+}
+
+static struct nfs_pgio_mirror *
+ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
+{
+ return &desc->pg_mirrors[idx];
+}
+
static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
.pg_init = ff_layout_pg_init_read,
.pg_test = pnfs_generic_pg_test,
.pg_doio = pnfs_generic_pg_writepages,
.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
.pg_cleanup = pnfs_generic_pg_cleanup,
+ .pg_get_mirror = ff_layout_pg_get_mirror_write,
+ .pg_set_mirror = ff_layout_pg_set_mirror_write,
};
static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
.rpc_resp = &res,
};
u32 xdrlen;
- int ret, np;
+ int ret, np, i;
+ ret = -ENOMEM;
res.scratch = alloc_page(GFP_KERNEL);
if (!res.scratch)
- return -ENOMEM;
+ goto out;
xdrlen = nfs42_listxattr_xdrsize(buflen);
if (xdrlen > server->lxasize)
np = xdrlen / PAGE_SIZE + 1;
pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL);
- if (pages == NULL) {
- __free_page(res.scratch);
- return -ENOMEM;
+ if (!pages)
+ goto out_free_scratch;
+ for (i = 0; i < np; i++) {
+ pages[i] = alloc_page(GFP_KERNEL);
+ if (!pages[i])
+ goto out_free_pages;
}
arg.xattr_pages = pages;
*eofp = res.eof;
}
+out_free_pages:
while (--np >= 0) {
if (pages[np])
__free_page(pages[np]);
}
-
- __free_page(res.scratch);
kfree(pages);
-
+out_free_scratch:
+ __free_page(res.scratch);
+out:
return ret;
}
rpc_prepare_reply_pages(req, args->xattr_pages, 0, args->count,
hdr.replen);
- req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES;
encode_nops(&hdr);
}
goto out_stateowner;
set_bit(NFS_SRV_SSC_COPY_STATE, &ctx->state->flags);
- set_bit(NFS_OPEN_STATE, &ctx->state->flags);
memcpy(&ctx->state->open_stateid.other, &stateid->other,
NFS4_STATEID_OTHER_SIZE);
update_open_stateid(ctx->state, stateid, NULL, filep->f_mode);
+ set_bit(NFS_OPEN_STATE, &ctx->state->flags);
nfs_file_set_open_context(filep, ctx);
put_nfs_open_context(ctx);
nfs4_read_done_cb(task, hdr);
}
-#ifdef CONFIG_NFS_V4_2
+#if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS
static void nfs42_read_plus_support(struct nfs_server *server, struct rpc_message *msg)
{
if (server->caps & NFS_CAP_READ_PLUS)
static struct kmem_cache *nfs_page_cachep;
static const struct rpc_call_ops nfs_pgio_common_ops;
+static struct nfs_pgio_mirror *
+nfs_pgio_get_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
+{
+ if (desc->pg_ops->pg_get_mirror)
+ return desc->pg_ops->pg_get_mirror(desc, idx);
+ return &desc->pg_mirrors[0];
+}
+
struct nfs_pgio_mirror *
nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
{
- return &desc->pg_mirrors[desc->pg_mirror_idx];
+ return nfs_pgio_get_mirror(desc, desc->pg_mirror_idx);
}
EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
+static u32
+nfs_pgio_set_current_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
+{
+ if (desc->pg_ops->pg_set_mirror)
+ return desc->pg_ops->pg_set_mirror(desc, idx);
+ return desc->pg_mirror_idx;
+}
+
void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr,
void (*release)(struct nfs_pgio_header *hdr))
return;
for (midx = 0; midx < desc->pg_mirror_count; midx++) {
- mirror = &desc->pg_mirrors[midx];
+ mirror = nfs_pgio_get_mirror(desc, midx);
desc->pg_completion_ops->error_cleanup(&mirror->pg_list,
desc->pg_error);
}
goto out_failed;
}
- desc->pg_mirror_idx = midx;
+ nfs_pgio_set_current_mirror(desc, midx);
if (!nfs_pageio_add_request_mirror(desc, dupreq))
goto out_cleanup_subreq;
}
- desc->pg_mirror_idx = 0;
+ nfs_pgio_set_current_mirror(desc, 0);
if (!nfs_pageio_add_request_mirror(desc, req))
goto out_failed;
static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
u32 mirror_idx)
{
- struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx];
- u32 restore_idx = desc->pg_mirror_idx;
+ struct nfs_pgio_mirror *mirror;
+ u32 restore_idx;
+
+ restore_idx = nfs_pgio_set_current_mirror(desc, mirror_idx);
+ mirror = nfs_pgio_current_mirror(desc);
- desc->pg_mirror_idx = mirror_idx;
for (;;) {
nfs_pageio_doio(desc);
if (desc->pg_error < 0 || !mirror->pg_recoalesce)
if (!nfs_do_recoalesce(desc))
break;
}
- desc->pg_mirror_idx = restore_idx;
+ nfs_pgio_set_current_mirror(desc, restore_idx);
}
/*
u32 midx;
for (midx = 0; midx < desc->pg_mirror_count; midx++) {
- mirror = &desc->pg_mirrors[midx];
+ mirror = nfs_pgio_get_mirror(desc, midx);
if (!list_empty(&mirror->pg_list)) {
prev = nfs_list_entry(mirror->pg_list.prev);
if (index != prev->wb_index + 1) {
src = *ppos;
svpfn = src / PM_ENTRY_BYTES;
- start_vaddr = svpfn << PAGE_SHIFT;
end_vaddr = mm->task_size;
/* watch out for wraparound */
- if (svpfn > mm->task_size >> PAGE_SHIFT)
+ start_vaddr = end_vaddr;
+ if (svpfn <= (ULONG_MAX >> PAGE_SHIFT))
+ start_vaddr = untagged_addr(svpfn << PAGE_SHIFT);
+
+ /* Ensure the address is inside the task */
+ if (start_vaddr > mm->task_size)
start_vaddr = end_vaddr;
/*
ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
struct seq_file *m = iocb->ki_filp->private_data;
- size_t size = iov_iter_count(iter);
size_t copied = 0;
size_t n;
void *p;
int err = 0;
+ if (!iov_iter_count(iter))
+ return 0;
+
mutex_lock(&m->lock);
/*
if (!m->buf)
goto Enomem;
}
- /* if not empty - flush it first */
+ // something left in the buffer - copy it out first
if (m->count) {
- n = min(m->count, size);
- if (copy_to_iter(m->buf + m->from, n, iter) != n)
- goto Efault;
+ n = copy_to_iter(m->buf + m->from, m->count, iter);
m->count -= n;
m->from += n;
- size -= n;
copied += n;
- if (!size)
+ if (m->count) // hadn't managed to copy everything
goto Done;
}
- /* we need at least one record in buffer */
+ // get a non-empty record in the buffer
m->from = 0;
p = m->op->start(m, &m->index);
while (1) {
err = PTR_ERR(p);
- if (!p || IS_ERR(p))
+ if (!p || IS_ERR(p)) // EOF or an error
break;
err = m->op->show(m, p);
- if (err < 0)
+ if (err < 0) // hard error
break;
- if (unlikely(err))
+ if (unlikely(err)) // ->show() says "skip it"
m->count = 0;
- if (unlikely(!m->count)) {
+ if (unlikely(!m->count)) { // empty record
p = m->op->next(m, p, &m->index);
continue;
}
- if (m->count < m->size)
+ if (!seq_has_overflowed(m)) // got it
goto Fill;
+ // need a bigger buffer
m->op->stop(m, p);
kvfree(m->buf);
m->count = 0;
goto Enomem;
p = m->op->start(m, &m->index);
}
+ // EOF or an error
m->op->stop(m, p);
m->count = 0;
goto Done;
Fill:
- /* they want more? let's try to get some more */
+ // one non-empty record is in the buffer; if they want more,
+ // try to fit more in, but in any case we need to advance
+ // the iterator once for every record shown.
while (1) {
size_t offs = m->count;
loff_t pos = m->index;
m->op->next);
m->index++;
}
- if (!p || IS_ERR(p)) {
- err = PTR_ERR(p);
+ if (!p || IS_ERR(p)) // no next record for us
break;
- }
- if (m->count >= size)
+ if (m->count >= iov_iter_count(iter))
break;
err = m->op->show(m, p);
- if (seq_has_overflowed(m) || err) {
+ if (err > 0) { // ->show() says "skip it"
m->count = offs;
- if (likely(err <= 0))
- break;
+ } else if (err || seq_has_overflowed(m)) {
+ m->count = offs;
+ break;
}
}
m->op->stop(m, p);
- n = min(m->count, size);
- if (copy_to_iter(m->buf, n, iter) != n)
- goto Efault;
+ n = copy_to_iter(m->buf, m->count, iter);
copied += n;
m->count -= n;
m->from = n;
Done:
- if (!copied)
- copied = err;
- else {
+ if (unlikely(!copied)) {
+ copied = m->count ? -EFAULT : err;
+ } else {
iocb->ki_pos += copied;
m->read_pos += copied;
}
Enomem:
err = -ENOMEM;
goto Done;
-Efault:
- err = -EFAULT;
- goto Done;
}
EXPORT_SYMBOL(seq_read_iter);
#define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
#define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
+#ifdef __GENKSYMS__
+/* genksyms gets confused by _Static_assert */
+#define _Static_assert(expr, ...)
+#endif
+
#endif /* _LINUX_BUILD_BUG_H */
#endif
}
+#if defined(CONFIG_UM) || defined(CONFIG_IA64)
/*
* These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
* extra segments containing the gate DSO contents. Dumping its
extern int
elf_core_write_extra_data(struct coredump_params *cprm);
extern size_t elf_core_extra_data_size(void);
+#else
+static inline Elf_Half elf_core_extra_phdrs(void)
+{
+ return 0;
+}
+
+static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
+{
+ return 1;
+}
+
+static inline int elf_core_write_extra_data(struct coredump_params *cprm)
+{
+ return 1;
+}
+
+static inline size_t elf_core_extra_data_size(void)
+{
+ return 0;
+}
+#endif
#endif /* _LINUX_ELFCORE_H */
unsigned int valid_hooks;
/* Man behind the curtain... */
- struct xt_table_info *private;
+ struct xt_table_info __rcu *private;
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
+struct xt_table_info
+*xt_table_get_private_protected(const struct xt_table *table);
+
#ifdef CONFIG_COMPAT
#include <net/compat.h>
unsigned short wb_nio; /* Number of I/O attempts */
};
+struct nfs_pgio_mirror;
struct nfs_pageio_descriptor;
struct nfs_pageio_ops {
void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *);
unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *,
struct nfs_page *);
void (*pg_cleanup)(struct nfs_pageio_descriptor *);
+ struct nfs_pgio_mirror *
+ (*pg_get_mirror)(struct nfs_pageio_descriptor *, u32);
+ u32 (*pg_set_mirror)(struct nfs_pageio_descriptor *, u32);
};
struct nfs_rw_ops {
static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc)
{
- return -EOPNOTSUPP;
+ return cap_inode_getsecurity(inode, name, buffer, alloc);
}
static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
int unicast_filter_entries;
int tx_fifo_size;
int rx_fifo_size;
+ u32 addr64;
u32 rx_queues_to_use;
u32 tx_queues_to_use;
u8 rx_sched_algorithm;
#define bond_for_each_slave_rcu(bond, pos, iter) \
netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
-#ifdef CONFIG_XFRM_OFFLOAD
#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \
NETIF_F_GSO_ESP)
-#endif /* CONFIG_XFRM_OFFLOAD */
#ifdef CONFIG_NET_POLL_CONTROLLER
extern atomic_t netpoll_block_tx;
void nft_chain_route_fini(void);
void nf_tables_trans_destroy_flush_work(void);
+
+int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result);
+__be64 nf_jiffies64_to_msecs(u64 input);
+
#endif /* _NET_NF_TABLES_H */
};
struct netdev_bpf;
-bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
- struct netdev_bpf *bpf);
void xdp_attachment_setup(struct xdp_attachment_info *info,
struct netdev_bpf *bpf);
/* Keep track of the vlan port masks */
u32 vlan_mask[VLAN_N_VID];
+ /* Switches like VSC9959 have flooding per traffic class */
+ int num_flooding_pgids;
+
/* In tables like ANA:PORT and the ANA:PGID:PGID mask,
* the CPU is located after the physical ports (at the
* num_phys_ports index).
FN(seq_printf_btf), \
FN(skb_cgroup_classid), \
FN(redirect_neigh), \
- FN(bpf_per_cpu_ptr), \
- FN(bpf_this_cpu_ptr), \
+ FN(per_cpu_ptr), \
+ FN(this_cpu_ptr), \
FN(redirect_peer), \
/* */
#include <linux/initrd.h>
#include <linux/kexec.h>
-void __weak free_initrd_mem(unsigned long start, unsigned long end)
+void __weak __init free_initrd_mem(unsigned long start, unsigned long end)
{
#ifdef CONFIG_ARCH_KEEP_MEMBLOCK
unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE);
obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
obj-$(CONFIG_TRACEPOINTS) += tracepoint.o
obj-$(CONFIG_LATENCYTOP) += latencytop.o
-obj-$(CONFIG_ELFCORE) += elfcore.o
obj-$(CONFIG_FUNCTION_TRACER) += trace/
obj-$(CONFIG_TRACING) += trace/
obj-$(CONFIG_TRACE_CLOCK) += trace/
return &bpf_snprintf_btf_proto;
case BPF_FUNC_jiffies64:
return &bpf_jiffies64_proto;
- case BPF_FUNC_bpf_per_cpu_ptr:
+ case BPF_FUNC_per_cpu_ptr:
return &bpf_per_cpu_ptr_proto;
- case BPF_FUNC_bpf_this_cpu_ptr:
+ case BPF_FUNC_this_cpu_ptr:
return &bpf_this_cpu_ptr_proto;
default:
break;
static bool __reg64_bound_s32(s64 a)
{
- if (a > S32_MIN && a < S32_MAX)
- return true;
- return false;
+ return a > S32_MIN && a < S32_MAX;
}
static bool __reg64_bound_u32(u64 a)
{
__mark_reg32_unbounded(reg);
- if (__reg64_bound_s32(reg->smin_value))
+ if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
reg->s32_min_value = (s32)reg->smin_value;
- if (__reg64_bound_s32(reg->smax_value))
reg->s32_max_value = (s32)reg->smax_value;
+ }
if (__reg64_bound_u32(reg->umin_value))
reg->u32_min_value = (u32)reg->umin_value;
if (__reg64_bound_u32(reg->umax_value))
ret_reg->smax_value = meta->msize_max_value;
ret_reg->s32_max_value = meta->msize_max_value;
+ ret_reg->smin_value = -MAX_ERRNO;
+ ret_reg->s32_min_value = -MAX_ERRNO;
__reg_deduce_bounds(ret_reg);
__reg_bound_offset(ret_reg);
__update_reg_bounds(ret_reg);
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/elf.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/binfmts.h>
-#include <linux/elfcore.h>
-
-Elf_Half __weak elf_core_extra_phdrs(void)
-{
- return 0;
-}
-
-int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
-{
- return 1;
-}
-
-int __weak elf_core_write_extra_data(struct coredump_params *cprm)
-{
- return 1;
-}
-
-size_t __weak elf_core_extra_data_size(void)
-{
- return 0;
-}
return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
case BPF_FUNC_snprintf_btf:
return &bpf_snprintf_btf_proto;
- case BPF_FUNC_bpf_per_cpu_ptr:
+ case BPF_FUNC_per_cpu_ptr:
return &bpf_per_cpu_ptr_proto;
- case BPF_FUNC_bpf_this_cpu_ptr:
+ case BPF_FUNC_this_cpu_ptr:
return &bpf_this_cpu_ptr_proto;
default:
return NULL;
#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
int tracing_set_tracer(struct trace_array *tr, const char *buf);
-static void ftrace_trace_userstack(struct trace_buffer *buffer,
+static void ftrace_trace_userstack(struct trace_array *tr,
+ struct trace_buffer *buffer,
unsigned long flags, int pc);
#define MAX_TRACER_SIZE 100
* two. They are not that meaningful.
*/
ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
- ftrace_trace_userstack(buffer, flags, pc);
+ ftrace_trace_userstack(tr, buffer, flags, pc);
}
/*
static DEFINE_PER_CPU(int, user_stack_count);
static void
-ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
+ftrace_trace_userstack(struct trace_array *tr,
+ struct trace_buffer *buffer, unsigned long flags, int pc)
{
struct trace_event_call *call = &event_user_stack;
struct ring_buffer_event *event;
struct userstack_entry *entry;
- if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
+ if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
return;
/*
preempt_enable();
}
#else /* CONFIG_USER_STACKTRACE_SUPPORT */
-static void ftrace_trace_userstack(struct trace_buffer *buffer,
+static void ftrace_trace_userstack(struct trace_array *tr,
+ struct trace_buffer *buffer,
unsigned long flags, int pc)
{
}
# off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS
# get appended last to CFLAGS and thus override those previous compiler options.
#
-FPU_CFLAGS := -mhard-float -msse -msse2
+FPU_CFLAGS := -msse -msse2
ifdef CONFIG_CC_IS_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
# -mpreferred-stack-boundary=3 is not between 4 and 12
#
# can be triggered. Otherwise gcc doesn't complain.
+FPU_CFLAGS += -mhard-float
FPU_CFLAGS += $(call cc-option,-msse -mpreferred-stack-boundary=3,-mpreferred-stack-boundary=4)
endif
}
EXPORT_SYMBOL_GPL(replace_page_cache_page);
-static noinline int __add_to_page_cache_locked(struct page *page,
+noinline int __add_to_page_cache_locked(struct page *page,
struct address_space *mapping,
pgoff_t offset, gfp_t gfp,
void **shadowp)
}
set_compound_order(page, 0);
+ page[1].compound_nr = 0;
__ClearPageHead(page);
}
#include <linux/srcu.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/cpuhotplug.h>
#include "../slab.h"
#include "kasan.h"
struct qlist_node *head;
struct qlist_node *tail;
size_t bytes;
+ bool offline;
};
#define QLIST_INIT { NULL, NULL, 0 }
local_irq_save(flags);
q = this_cpu_ptr(&cpu_quarantine);
+ if (q->offline) {
+ local_irq_restore(flags);
+ return;
+ }
qlist_put(q, &info->quarantine_link, cache->size);
if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
qlist_move_all(q, &temp);
synchronize_srcu(&remove_cache_srcu);
}
+
+static int kasan_cpu_online(unsigned int cpu)
+{
+ this_cpu_ptr(&cpu_quarantine)->offline = false;
+ return 0;
+}
+
+static int kasan_cpu_offline(unsigned int cpu)
+{
+ struct qlist_head *q;
+
+ q = this_cpu_ptr(&cpu_quarantine);
+ /* Ensure the ordering between the writing to q->offline and
+ * qlist_free_all. Otherwise, cpu_quarantine may be corrupted
+ * by interrupt.
+ */
+ WRITE_ONCE(q->offline, true);
+ barrier();
+ qlist_free_all(q, NULL);
+ return 0;
+}
+
+static int __init kasan_cpu_quarantine_init(void)
+{
+ int ret = 0;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online",
+ kasan_cpu_online, kasan_cpu_offline);
+ if (ret < 0)
+ pr_err("kasan cpu quarantine register failed [%d]\n", ret);
+ return ret;
+}
+late_initcall(kasan_cpu_quarantine_init);
goto put_pid;
}
- if (task->mm != current->mm &&
- !process_madvise_behavior_valid(behavior)) {
+ if (!process_madvise_behavior_valid(behavior)) {
ret = -EINVAL;
goto release_task;
}
br_stp_enable_bridge(br);
br_multicast_open(br);
+ if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
+ br_multicast_join_snoopers(br);
+
return 0;
}
br_stp_disable_bridge(br);
br_multicast_stop(br);
+ if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
+ br_multicast_leave_snoopers(br);
+
netif_stop_queue(dev);
return 0;
}
#endif
-static void br_multicast_join_snoopers(struct net_bridge *br)
+void br_multicast_join_snoopers(struct net_bridge *br)
{
br_ip4_multicast_join_snoopers(br);
br_ip6_multicast_join_snoopers(br);
}
#endif
-static void br_multicast_leave_snoopers(struct net_bridge *br)
+void br_multicast_leave_snoopers(struct net_bridge *br)
{
br_ip4_multicast_leave_snoopers(br);
br_ip6_multicast_leave_snoopers(br);
void br_multicast_open(struct net_bridge *br)
{
- if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
- br_multicast_join_snoopers(br);
-
__br_multicast_open(br, &br->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
__br_multicast_open(br, &br->ip6_own_query);
del_timer_sync(&br->ip6_other_query.timer);
del_timer_sync(&br->ip6_own_query.timer);
#endif
-
- if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
- br_multicast_leave_snoopers(br);
}
void br_multicast_dev_del(struct net_bridge *br)
int br_multicast_toggle(struct net_bridge *br, unsigned long val)
{
struct net_bridge_port *port;
+ bool change_snoopers = false;
spin_lock_bh(&br->multicast_lock);
if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
br_mc_disabled_update(br->dev, val);
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
- br_multicast_leave_snoopers(br);
+ change_snoopers = true;
goto unlock;
}
list_for_each_entry(port, &br->port_list, list)
__br_multicast_enable_port(port);
+ change_snoopers = true;
+
unlock:
spin_unlock_bh(&br->multicast_lock);
+ /* br_multicast_join_snoopers has the potential to cause
+ * an MLD Report/Leave to be delivered to br_multicast_rcv,
+ * which would in turn call br_multicast_add_group, which would
+ * attempt to acquire multicast_lock. This function should be
+ * called after the lock has been released to avoid deadlocks on
+ * multicast_lock.
+ *
+ * br_multicast_leave_snoopers does not have the problem since
+ * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
+ * returns without calling br_multicast_ipv4/6_rcv if it's not
+ * enabled. Moved both functions out just for symmetry.
+ */
+ if (change_snoopers) {
+ if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
+ br_multicast_join_snoopers(br);
+ else
+ br_multicast_leave_snoopers(br);
+ }
+
return 0;
}
void br_multicast_enable_port(struct net_bridge_port *port);
void br_multicast_disable_port(struct net_bridge_port *port);
void br_multicast_init(struct net_bridge *br);
+void br_multicast_join_snoopers(struct net_bridge *br);
+void br_multicast_leave_snoopers(struct net_bridge *br);
void br_multicast_open(struct net_bridge *br);
void br_multicast_stop(struct net_bridge *br);
void br_multicast_dev_del(struct net_bridge *br);
{
}
+static inline void br_multicast_join_snoopers(struct net_bridge *br)
+{
+}
+
+static inline void br_multicast_leave_snoopers(struct net_bridge *br)
+{
+}
+
static inline void br_multicast_open(struct net_bridge *br)
{
}
}
masterv = br_vlan_get_master(br, v->vid, extack);
- if (!masterv)
+ if (!masterv) {
+ err = -ENOMEM;
goto out_filt;
+ }
v->brvlan = masterv;
if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
if (level != SOL_CAN_ISOTP)
return -EINVAL;
+ if (so->bound)
+ return -EISCONN;
+
switch (optname) {
case CAN_ISOTP_OPTS:
if (optlen != sizeof(struct can_isotp_options))
return dev->xdp_state[mode].prog;
}
+static u8 dev_xdp_prog_count(struct net_device *dev)
+{
+ u8 count = 0;
+ int i;
+
+ for (i = 0; i < __MAX_XDP_MODE; i++)
+ if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
+ count++;
+ return count;
+}
+
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
{
struct bpf_prog *prog = dev_xdp_prog(dev, mode);
struct bpf_xdp_link *link, struct bpf_prog *new_prog,
struct bpf_prog *old_prog, u32 flags)
{
+ unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
struct bpf_prog *cur_prog;
enum bpf_xdp_mode mode;
bpf_op_t bpf_op;
NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
return -EINVAL;
}
- /* just one XDP mode bit should be set, zero defaults to SKB mode */
- if (hweight32(flags & XDP_FLAGS_MODES) > 1) {
+ /* just one XDP mode bit should be set, zero defaults to drv/skb mode */
+ if (num_modes > 1) {
NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
return -EINVAL;
}
+ /* avoid ambiguity if offload + drv/skb mode progs are both loaded */
+ if (!num_modes && dev_xdp_prog_count(dev) > 1) {
+ NL_SET_ERR_MSG(extack,
+ "More than one program loaded, unset mode is ambiguous");
+ return -EINVAL;
+ }
/* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
if (this->release == release &&
- this->indr.cb_priv == cb_priv) {
+ this->indr.cb_priv == cb_priv)
list_move(&this->indr.list, cleanup_list);
- return;
- }
}
}
{
int ret;
- /* Preempt disable is needed to protect per-cpu redirect_info between
- * BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and
- * access to maps strictly require a rcu_read_lock() for protection,
- * mixing with BH RCU lock doesn't work.
+ /* Migration disable and BH disable are needed to protect per-cpu
+ * redirect_info between BPF prog and skb_do_redirect().
*/
- preempt_disable();
+ migrate_disable();
+ local_bh_disable();
bpf_compute_data_pointers(skb);
ret = bpf_prog_run_save_cb(lwt->prog, skb);
break;
}
- preempt_enable();
+ local_bh_enable();
+ migrate_enable();
return ret;
}
* scenarios (e.g. queue full), it is possible to return the xdp_frame
* while still leveraging this protection. The @napi_direct boolean
* is used for those calls sites. Thus, allowing for faster recycling
- * of xdp_frames/pages in those cases. This path is never used by the
- * MEM_TYPE_XSK_BUFF_POOL memory type, so it's explicitly not part of
- * the switch-statement.
+ * of xdp_frames/pages in those cases.
*/
-static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
+static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
+ struct xdp_buff *xdp)
{
struct xdp_mem_allocator *xa;
struct page *page;
page = virt_to_page(data); /* Assumes order0 page*/
put_page(page);
break;
+ case MEM_TYPE_XSK_BUFF_POOL:
+ /* NB! Only valid from an xdp_buff! */
+ xsk_buff_free(xdp);
+ break;
default:
/* Not possible, checked in xdp_rxq_info_reg_mem_model() */
WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
void xdp_return_frame(struct xdp_frame *xdpf)
{
- __xdp_return(xdpf->data, &xdpf->mem, false);
+ __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
}
EXPORT_SYMBOL_GPL(xdp_return_frame);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
{
- __xdp_return(xdpf->data, &xdpf->mem, true);
+ __xdp_return(xdpf->data, &xdpf->mem, true, NULL);
}
EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
void xdp_return_buff(struct xdp_buff *xdp)
{
- __xdp_return(xdp->data, &xdp->rxq->mem, true);
+ __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
}
/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
}
EXPORT_SYMBOL_GPL(__xdp_release_frame);
-bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
- struct netdev_bpf *bpf)
-{
- if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) {
- NL_SET_ERR_MSG(bpf->extack,
- "program loaded with different flags");
- return false;
- }
- return true;
-}
-EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok);
-
void xdp_attachment_setup(struct xdp_attachment_info *info,
struct netdev_bpf *bpf)
{
return ret;
change_bits = nla_get_u32(tb[ETHTOOL_A_BITSET_SIZE]);
+ if (change_bits > nbits)
+ change_bits = nbits;
bitmap_from_arr32(val, nla_data(tb[ETHTOOL_A_BITSET_VALUE]),
change_bits);
if (change_bits < nbits)
if (has_gw && has_via) {
NL_SET_ERR_MSG(extack,
"Nexthop configuration can not contain both GATEWAY and VIA");
- goto errout;
+ return -EINVAL;
}
return 0;
local_bh_disable();
addend = xt_write_recseq_begin();
- private = READ_ONCE(table->private); /* Address dependency. */
+ private = rcu_access_pointer(table->private);
cpu = smp_processor_id();
table_base = private->entries;
jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
{
unsigned int countersize;
struct xt_counters *counters;
- const struct xt_table_info *private = table->private;
+ const struct xt_table_info *private = xt_table_get_private_protected(table);
/* We need atomic snapshot of counters: rest doesn't change
* (other than comefrom, which userspace doesn't care
unsigned int off, num;
const struct arpt_entry *e;
struct xt_counters *counters;
- struct xt_table_info *private = table->private;
+ struct xt_table_info *private = xt_table_get_private_protected(table);
int ret = 0;
void *loc_cpu_entry;
t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
if (!IS_ERR(t)) {
struct arpt_getinfo info;
- const struct xt_table_info *private = t->private;
+ const struct xt_table_info *private = xt_table_get_private_protected(t);
#ifdef CONFIG_COMPAT
struct xt_table_info tmp;
t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
if (!IS_ERR(t)) {
- const struct xt_table_info *private = t->private;
+ const struct xt_table_info *private = xt_table_get_private_protected(t);
if (get.size == private->size)
ret = copy_entries_to_user(private->size,
}
local_bh_disable();
- private = t->private;
+ private = xt_table_get_private_protected(t);
if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
void __user *userptr)
{
struct xt_counters *counters;
- const struct xt_table_info *private = table->private;
+ const struct xt_table_info *private = xt_table_get_private_protected(table);
void __user *pos;
unsigned int size;
int ret = 0;
WARN_ON(!(table->valid_hooks & (1 << hook)));
local_bh_disable();
addend = xt_write_recseq_begin();
- private = READ_ONCE(table->private); /* Address dependency. */
+ private = rcu_access_pointer(table->private);
cpu = smp_processor_id();
table_base = private->entries;
jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
{
unsigned int countersize;
struct xt_counters *counters;
- const struct xt_table_info *private = table->private;
+ const struct xt_table_info *private = xt_table_get_private_protected(table);
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
unsigned int off, num;
const struct ipt_entry *e;
struct xt_counters *counters;
- const struct xt_table_info *private = table->private;
+ const struct xt_table_info *private = xt_table_get_private_protected(table);
int ret = 0;
const void *loc_cpu_entry;
t = xt_request_find_table_lock(net, AF_INET, name);
if (!IS_ERR(t)) {
struct ipt_getinfo info;
- const struct xt_table_info *private = t->private;
+ const struct xt_table_info *private = xt_table_get_private_protected(t);
#ifdef CONFIG_COMPAT
struct xt_table_info tmp;
t = xt_find_table_lock(net, AF_INET, get.name);
if (!IS_ERR(t)) {
- const struct xt_table_info *private = t->private;
+ const struct xt_table_info *private = xt_table_get_private_protected(t);
if (get.size == private->size)
ret = copy_entries_to_user(private->size,
t, uptr->entrytable);
}
local_bh_disable();
- private = t->private;
+ private = xt_table_get_private_protected(t);
if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
void __user *userptr)
{
struct xt_counters *counters;
- const struct xt_table_info *private = table->private;
+ const struct xt_table_info *private = xt_table_get_private_protected(table);
void __user *pos;
unsigned int size;
int ret = 0;
if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
tcp_sndbuf_expand(sk);
- tp->rcvq_space.space = min_t(u32, tp->rcv_wnd, TCP_INIT_CWND * tp->advmss);
tcp_mstamp_refresh(tp);
tp->rcvq_space.time = tp->tcp_mstamp;
tp->rcvq_space.seq = tp->copied_seq;
tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
tp->snd_cwnd_stamp = tcp_jiffies32;
+ tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd,
+ (u32)TCP_INIT_CWND * tp->advmss);
}
/* 4. Recalculate window clamp after socket hit its memory bounds. */
__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
tos = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
- tcp_rsk(req)->syn_tos & ~INET_ECN_MASK :
+ (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
+ (inet_sk(sk)->tos & INET_ECN_MASK) :
inet_sk(sk)->tos;
if (!INET_ECN_is_capable(tos) &&
inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
newinet->inet_id = prandom_u32();
- /* Set ToS of the new socket based upon the value of incoming SYN. */
+ /* Set ToS of the new socket based upon the value of incoming SYN.
+ * ECT bits are set later in tcp_init_transfer().
+ */
if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
* window, and remember whether we were cwnd-limited then.
*/
if (!before(tp->snd_una, tp->max_packets_seq) ||
- tp->packets_out > tp->max_packets_out) {
+ tp->packets_out > tp->max_packets_out ||
+ is_cwnd_limited) {
tp->max_packets_out = tp->packets_out;
tp->max_packets_seq = tp->snd_nxt;
tp->is_cwnd_limited = is_cwnd_limited;
else
tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
+ is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
+ if (likely(sent_pkts || is_cwnd_limited))
+ tcp_cwnd_validate(sk, is_cwnd_limited);
+
if (likely(sent_pkts)) {
if (tcp_in_cwnd_reduction(sk))
tp->prr_out += sent_pkts;
/* Send one loss probe per tail loss episode. */
if (push_one != 2)
tcp_schedule_loss_probe(sk, false);
- is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
- tcp_cwnd_validate(sk, is_cwnd_limited);
return false;
}
return !tp->packets_out && !tcp_write_queue_empty(sk);
__skb_pull(skb, skb_transport_offset(skb));
ret = udp_queue_rcv_one_skb(sk, skb);
if (ret > 0)
- ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret);
+ ip_protocol_deliver_rcu(dev_net(skb->dev), skb, ret);
}
return 0;
}
local_bh_disable();
addend = xt_write_recseq_begin();
- private = READ_ONCE(table->private); /* Address dependency. */
+ private = rcu_access_pointer(table->private);
cpu = smp_processor_id();
table_base = private->entries;
jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
{
unsigned int countersize;
struct xt_counters *counters;
- const struct xt_table_info *private = table->private;
+ const struct xt_table_info *private = xt_table_get_private_protected(table);
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
unsigned int off, num;
const struct ip6t_entry *e;
struct xt_counters *counters;
- const struct xt_table_info *private = table->private;
+ const struct xt_table_info *private = xt_table_get_private_protected(table);
int ret = 0;
const void *loc_cpu_entry;
t = xt_request_find_table_lock(net, AF_INET6, name);
if (!IS_ERR(t)) {
struct ip6t_getinfo info;
- const struct xt_table_info *private = t->private;
+ const struct xt_table_info *private = xt_table_get_private_protected(t);
#ifdef CONFIG_COMPAT
struct xt_table_info tmp;
t = xt_find_table_lock(net, AF_INET6, get.name);
if (!IS_ERR(t)) {
- struct xt_table_info *private = t->private;
+ struct xt_table_info *private = xt_table_get_private_protected(t);
if (get.size == private->size)
ret = copy_entries_to_user(private->size,
t, uptr->entrytable);
}
local_bh_disable();
- private = t->private;
+ private = xt_table_get_private_protected(t);
if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
void __user *userptr)
{
struct xt_counters *counters;
- const struct xt_table_info *private = table->private;
+ const struct xt_table_info *private = xt_table_get_private_protected(table);
void __user *pos;
unsigned int size;
int ret = 0;
fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
- tcp_rsk(req)->syn_tos & ~INET_ECN_MASK :
+ (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
+ (np->tclass & INET_ECN_MASK) :
np->tclass;
if (!INET_ECN_is_capable(tclass) &&
if (np->repflow)
newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
- /* Set ToS of the new socket based upon the value of incoming SYN. */
+ /* Set ToS of the new socket based upon the value of incoming SYN.
+ * ECT bits are set later in tcp_init_transfer().
+ */
if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
return ret;
}
+ set_bit(SDATA_STATE_RUNNING, &sdata->state);
+
ret = ieee80211_check_queues(sdata, NL80211_IFTYPE_MONITOR);
if (ret) {
kfree(sdata);
atomic_set(&newtbl->entries, 0);
spin_lock_init(&newtbl->gates_lock);
spin_lock_init(&newtbl->walk_lock);
+ rhashtable_init(&newtbl->rhead, &mesh_rht_params);
return newtbl;
}
goto free_path;
}
- rhashtable_init(&tbl_path->rhead, &mesh_rht_params);
- rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params);
-
sdata->u.mesh.mesh_paths = tbl_path;
sdata->u.mesh.mpp_paths = tbl_mpp;
*chandef = he_chandef;
- return false;
+ return true;
}
bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
for (i = 0; mptcp_snmp_list[i].name; i++)
seq_puts(seq, " 0");
+ seq_putc(seq, '\n');
return;
}
}
nla_strlcpy(ifname, attr, IFNAMSIZ);
+ /* nf_tables_netdev_event() is called under rtnl_mutex, this is
+ * indirectly serializing all the other holders of the commit_mutex with
+ * the rtnl_mutex.
+ */
dev = __dev_get_by_name(net, ifname);
if (!dev) {
err = -ENOENT;
return 0;
}
-static int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result)
+int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result)
{
u64 ms = be64_to_cpu(nla_get_be64(nla));
u64 max = (u64)(~((u64)0));
return 0;
}
-static __be64 nf_jiffies64_to_msecs(u64 input)
+__be64 nf_jiffies64_to_msecs(u64 input)
{
return cpu_to_be64(jiffies64_to_msecs(input));
}
}
#endif
case NFT_CT_ID:
- if (!nf_ct_is_confirmed(ct))
- goto err;
*dest = nf_ct_get_id(ct);
return;
default:
if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
return -EINVAL;
- timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
- tb[NFTA_DYNSET_TIMEOUT])));
+
+ err = nf_msecs_to_jiffies64(tb[NFTA_DYNSET_TIMEOUT], &timeout);
+ if (err)
+ return err;
}
priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
goto nla_put_failure;
if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
- cpu_to_be64(jiffies_to_msecs(priv->timeout)),
+ nf_jiffies64_to_msecs(priv->timeout),
NFTA_DYNSET_PAD))
goto nla_put_failure;
if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
}
EXPORT_SYMBOL(xt_counters_alloc);
+struct xt_table_info
+*xt_table_get_private_protected(const struct xt_table *table)
+{
+ return rcu_dereference_protected(table->private,
+ mutex_is_locked(&xt[table->af].mutex));
+}
+EXPORT_SYMBOL(xt_table_get_private_protected);
+
struct xt_table_info *
xt_replace_table(struct xt_table *table,
unsigned int num_counters,
int *error)
{
struct xt_table_info *private;
- unsigned int cpu;
int ret;
ret = xt_jumpstack_alloc(newinfo);
}
/* Do the substitution. */
- local_bh_disable();
- private = table->private;
+ private = xt_table_get_private_protected(table);
/* Check inside lock: is the old number correct? */
if (num_counters != private->number) {
pr_debug("num_counters != table->private->number (%u/%u)\n",
num_counters, private->number);
- local_bh_enable();
*error = -EAGAIN;
return NULL;
}
newinfo->initial_entries = private->initial_entries;
- /*
- * Ensure contents of newinfo are visible before assigning to
- * private.
- */
- smp_wmb();
- table->private = newinfo;
-
- /* make sure all cpus see new ->private value */
- smp_wmb();
- /*
- * Even though table entries have now been swapped, other CPU's
- * may still be using the old entries...
- */
- local_bh_enable();
-
- /* ... so wait for even xt_recseq on all cpus */
- for_each_possible_cpu(cpu) {
- seqcount_t *s = &per_cpu(xt_recseq, cpu);
- u32 seq = raw_read_seqcount(s);
-
- if (seq & 1) {
- do {
- cond_resched();
- cpu_relax();
- } while (seq == raw_read_seqcount(s));
- }
- }
+ rcu_assign_pointer(table->private, newinfo);
+ synchronize_rcu();
audit_log_nfcfg(table->name, table->af, private->number,
!private->number ? AUDIT_XT_OP_REGISTER :
}
/* Simplifies replace_table code. */
- table->private = bootstrap;
+ rcu_assign_pointer(table->private, bootstrap);
if (!xt_replace_table(table, 0, newinfo, &ret))
goto unlock;
- private = table->private;
+ private = xt_table_get_private_protected(table);
pr_debug("table->private->number = %u\n", private->number);
/* save number of initial entries */
struct xt_table_info *private;
mutex_lock(&xt[table->af].mutex);
- private = table->private;
+ private = xt_table_get_private_protected(table);
+ RCU_INIT_POINTER(table->private, NULL);
list_del(&table->list);
mutex_unlock(&xt[table->af].mutex);
audit_log_nfcfg(table->name, table->af, private->number,
action_start = add_nested_action_start(sfa, OVS_DEC_TTL_ATTR_ACTION, log);
if (action_start < 0)
- return start;
+ return action_start;
err = __ovs_nla_copy_actions(net, actions, key, sfa, eth_type,
vlan_tci, mpls_label_count, log);
return err;
}
if (lse_mask->mpls_label) {
- err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
- lse_key->mpls_label);
+ err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
+ lse_key->mpls_label);
if (err)
return err;
}
INIT_LIST_HEAD(&q->new_flows);
INIT_LIST_HEAD(&q->old_flows);
+ timer_setup(&q->adapt_timer, fq_pie_timer, 0);
if (opt) {
err = fq_pie_change(sch, opt, extack);
pie_vars_init(&flow->vars);
}
- timer_setup(&q->adapt_timer, fq_pie_timer, 0);
mod_timer(&q->adapt_timer, jiffies + HZ / 2);
return 0;
&xmitq);
else if (prop == TIPC_NLA_PROP_MTU)
tipc_link_set_mtu(e->link, b->mtu);
+
+ /* Update MTU for node link entry */
+ e->mtu = tipc_link_mss(e->link);
}
- /* Update MTU for node link entry */
- e->mtu = tipc_link_mss(e->link);
+
tipc_node_write_unlock(n);
tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
}
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct nlattr *tb[NUM_NL80211_REKEY_DATA];
- struct cfg80211_gtk_rekey_data rekey_data;
+ struct cfg80211_gtk_rekey_data rekey_data = {};
int err;
if (!info->attrs[NL80211_ATTR_REKEY_DATA])
return 0;
}
+static bool xsk_tx_writeable(struct xdp_sock *xs)
+{
+ if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
+ return false;
+
+ return true;
+}
+
static bool xsk_is_bound(struct xdp_sock *xs)
{
if (READ_ONCE(xs->state) == XSK_BOUND) {
rcu_read_lock();
list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
__xskq_cons_release(xs->tx);
- xs->sk.sk_write_space(&xs->sk);
+ if (xsk_tx_writeable(xs))
+ xs->sk.sk_write_space(&xs->sk);
}
rcu_read_unlock();
}
out:
if (sent_frame)
- sk->sk_write_space(sk);
+ if (xsk_tx_writeable(xs))
+ sk->sk_write_space(sk);
mutex_unlock(&xs->mutex);
return err;
static __poll_t xsk_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait)
{
- __poll_t mask = datagram_poll(file, sock, wait);
+ __poll_t mask = 0;
struct sock *sk = sock->sk;
struct xdp_sock *xs = xdp_sk(sk);
struct xsk_buff_pool *pool;
+ sock_poll_wait(file, sock, wait);
+
if (unlikely(!xsk_is_bound(xs)))
return mask;
if (xs->rx && !xskq_prod_is_empty(xs->rx))
mask |= EPOLLIN | EPOLLRDNORM;
- if (xs->tx && !xskq_cons_is_full(xs->tx))
+ if (xs->tx && xsk_tx_writeable(xs))
mask |= EPOLLOUT | EPOLLWRNORM;
return mask;
if (!pool->dma_pages) {
WARN(1, "Driver did not DMA map zero-copy buffers");
+ err = -EINVAL;
goto err_unreg_xsk;
}
pool->umem->zc = true;
q->nentries;
}
+static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
+{
+ /* No barriers needed since data is not accessed */
+ return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
+}
+
/* Functions for producers */
static inline bool xskq_prod_is_full(struct xsk_queue *q)
case XFRMA_PAD:
/* Ignore */
return 0;
+ case XFRMA_UNSPEC:
case XFRMA_ALG_AUTH:
case XFRMA_ALG_CRYPT:
case XFRMA_ALG_COMP:
memcpy(nla, src, nla_attr_size(copy_len));
nla->nla_len = nla_attr_size(payload);
- *pos += nla_attr_size(payload);
+ *pos += nla_attr_size(copy_len);
nlmsg->nlmsg_len += nla->nla_len;
memset(dst + *pos, 0, payload - copy_len);
return NULL;
len += NLMSG_HDRLEN;
- h64 = kvmalloc(len, GFP_KERNEL | __GFP_ZERO);
+ h64 = kvmalloc(len, GFP_KERNEL);
if (!h64)
return ERR_PTR(-ENOMEM);
if (in_compat_syscall()) {
struct xfrm_translator *xtr = xfrm_get_translator();
- if (!xtr)
+ if (!xtr) {
+ kfree(data);
return -EOPNOTSUPP;
+ }
err = xtr->xlate_user_policy_sockptr(&data, optlen);
xfrm_put_translator(xtr);
int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
{
- char buf[4096];
- struct pid_iter_bpf *skel;
struct pid_iter_entry *e;
+ char buf[4096 / sizeof(*e) * sizeof(*e)];
+ struct pid_iter_bpf *skel;
int err, ret, fd = -1, i;
libbpf_print_fn_t default_print;
FN(seq_printf_btf), \
FN(skb_cgroup_classid), \
FN(redirect_neigh), \
- FN(bpf_per_cpu_ptr), \
- FN(bpf_this_cpu_ptr), \
+ FN(per_cpu_ptr), \
+ FN(this_cpu_ptr), \
FN(redirect_peer), \
/* */
err = ringbuf_process_ring(ring);
if (err < 0)
return err;
- res += cnt;
+ res += err;
}
return cnt < 0 ? -errno : res;
}
if ($reboot_type eq "grub") {
run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'";
- } elsif ($reboot_type eq "grub2") {
+ } elsif (($reboot_type eq "grub2") or ($reboot_type eq "grub2bls")) {
run_ssh "$grub_reboot $grub_number";
} elsif ($reboot_type eq "syslinux") {
run_ssh "$syslinux --once \\\"$syslinux_label\\\" $syslinux_path";
*/
{7, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
/* Checked s>=0 */
- {9, "R5=inv(id=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"},
+ {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
/* packet pointer + nonnegative (4n+2) */
- {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"},
- {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"},
+ {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
+ {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
* We checked the bounds, but it might have been able
* to overflow if the packet pointer started in the
* So we did not get a 'range' on R6, and the access
* attempt will fail.
*/
- {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"},
+ {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
}
},
{
if (CHECK(err, "join_bg", "err %d\n", err))
goto cleanup;
- if (CHECK(bg_ret != 1, "bg_ret", "epoll_wait result: %ld", bg_ret))
+ if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret))
goto cleanup;
+ /* due to timing variations, there could still be non-notified
+ * samples, so consume them here to collect all the samples
+ */
+ err = ring_buffer__consume(ringbuf);
+ CHECK(err < 0, "rb_consume", "failed: %d\b", err);
+
/* 3 rounds, 2 samples each */
cnt = atomic_xchg(&sample_cnt, 0);
CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt);
/* poll for samples, should get 2 ringbufs back */
err = ring_buffer__poll(ringbuf, -1);
- if (CHECK(err != 4, "poll_res", "expected 4 records, got %d\n", err))
+ if (CHECK(err != 2, "poll_res", "expected 2 records, got %d\n", err))
goto cleanup;
/* expect extra polling to return nothing */
def bpftool_map_list(expected=None, ns=""):
_, maps = bpftool("map show", JSON=True, ns=ns, fail=True)
# Remove the base maps
- for m in base_maps:
- if m in maps:
- maps.remove(m)
+ maps = [m for m in maps if m not in base_maps and m.get('name') not in base_map_names]
if expected is not None:
if len(maps) != expected:
fail(True, "%d BPF maps loaded, expected %d" %
fail(ret == 0, "Replaced one of programs without -force")
check_extack(err, "XDP program already attached.", args)
- if modename == "" or modename == "drv":
- othermode = "" if modename == "drv" else "drv"
- start_test("Test multi-attachment XDP - detach...")
- ret, _, err = sim.unset_xdp(othermode, force=True,
- fail=False, include_stderr=True)
- fail(ret == 0, "Removed program with a bad mode")
- check_extack(err, "program loaded with different flags.", args)
+ start_test("Test multi-attachment XDP - remove without mode...")
+ ret, _, err = sim.unset_xdp("", force=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Removed program without a mode flag")
+ check_extack(err, "More than one program loaded, unset mode is ambiguous.", args)
sim.unset_xdp("offload")
xdp = sim.ip_link_show(xdp=True)["xdp"]
skip(ret != 0, "bpftool not installed")
base_progs = progs
_, base_maps = bpftool("map")
+base_map_names = [
+ 'pid_iter.rodata' # created on each bpftool invocation
+]
# Check netdevsim
ret, out = cmd("modprobe netdevsim", fail=False)
sim.tc_flush_filters()
+ start_test("Test TC offloads failure...")
+ sim.dfs["dev/bpf_bind_verifier_accept"] = 0
+ ret, _, err = sim.cls_bpf_add_filter(obj, verbose=True, skip_sw=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "TC filter did not reject with TC offloads enabled")
+ check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
+ sim.dfs["dev/bpf_bind_verifier_accept"] = 1
+
start_test("Test TC offloads work...")
ret, _, err = sim.cls_bpf_add_filter(obj, verbose=True, skip_sw=True,
fail=False, include_stderr=True)
fail(ret != 0, "TC filter did not load with TC offloads enabled")
- check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
start_test("Test TC offload basics...")
dfs = simdev.dfs_get_bound_progs(expected=1)
start_test("Test disabling TC offloads is rejected while filters installed...")
ret, _ = sim.set_ethtool_tc_offloads(False, fail=False)
fail(ret == 0, "Driver should refuse to disable TC offloads with filters installed...")
+ sim.set_ethtool_tc_offloads(True)
start_test("Test qdisc removal frees things...")
sim.tc_flush_filters()
fail=False, include_stderr=True)
fail(ret == 0, "Replaced XDP program with a program in different mode")
check_extack(err,
- "native and generic XDP can't be active at the same time.",
+ "Native and generic XDP can't be active at the same time.",
args)
- ret, _, err = sim.set_xdp(obj, "", force=True,
- fail=False, include_stderr=True)
- fail(ret == 0, "Replaced XDP program with a program in different mode")
- check_extack(err, "program loaded with different flags.", args)
-
- start_test("Test XDP prog remove with bad flags...")
- ret, _, err = sim.unset_xdp("", force=True,
- fail=False, include_stderr=True)
- fail(ret == 0, "Removed program with a bad mode")
- check_extack(err, "program loaded with different flags.", args)
start_test("Test MTU restrictions...")
ret, _ = sim.set_mtu(9000, fail=False)
offload = bpf_pinned("/sys/fs/bpf/offload")
ret, _, err = sim.set_xdp(offload, "drv", fail=False, include_stderr=True)
fail(ret == 0, "attached offloaded XDP program to drv")
- check_extack(err, "using device-bound program without HW_MODE flag is not supported.", args)
+ check_extack(err, "Using device-bound program without HW_MODE flag is not supported.", args)
rm("/sys/fs/bpf/offload")
sim.wait_for_flush()
+ start_test("Test XDP load failure...")
+ sim.dfs["dev/bpf_bind_verifier_accept"] = 0
+ ret, _, err = bpftool_prog_load("sample_ret0.o", "/sys/fs/bpf/offload",
+ dev=sim['ifname'], fail=False, include_stderr=True)
+ fail(ret == 0, "verifier should fail on load")
+ check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
+ sim.dfs["dev/bpf_bind_verifier_accept"] = 1
+ sim.wait_for_flush()
+
start_test("Test XDP offload...")
_, _, err = sim.set_xdp(obj, "offload", verbose=True, include_stderr=True)
ipl = sim.ip_link_show(xdp=True)
progs = bpftool_prog_list(expected=1)
prog = progs[0]
fail(link_xdp["id"] != prog["id"], "Loaded program has wrong ID")
- check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
start_test("Test XDP offload is device bound...")
dfs = simdev.dfs_get_bound_progs(expected=1)
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
+ BPF_JMP32_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
+{
+ "bounds checks after 32-bit truncation. test 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ /* This used to reduce the max bound to 0x7fffffff */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0x7fffffff, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "bounds checks after 32-bit truncation. test 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_1, 1, 1),
+ BPF_JMP32_IMM(BPF_JSLT, BPF_REG_1, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
fi
}
+setup_cmd_nsc()
+{
+ local cmd="$*"
+ local rc
+
+ run_cmd_nsc ${cmd}
+ rc=$?
+ if [ $rc -ne 0 ]; then
+ # show user the command if not done so already
+ if [ "$VERBOSE" = "0" ]; then
+ echo "setup command: $cmd"
+ fi
+ echo "failed. stopping tests"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue"
+ read a
+ fi
+ exit $rc
+ fi
+}
+
# set sysctl values in NS-A
set_sysctl()
{
sleep 1
}
+setup_lla_only()
+{
+ # make sure we are starting with a clean slate
+ kill_procs
+ cleanup 2>/dev/null
+
+ log_debug "Configuring network namespaces"
+ set -e
+
+ create_ns ${NSA} "-" "-"
+ create_ns ${NSB} "-" "-"
+ create_ns ${NSC} "-" "-"
+ connect_ns ${NSA} ${NSA_DEV} "-" "-" \
+ ${NSB} ${NSB_DEV} "-" "-"
+ connect_ns ${NSA} ${NSA_DEV2} "-" "-" \
+ ${NSC} ${NSC_DEV} "-" "-"
+
+ NSA_LINKIP6=$(get_linklocal ${NSA} ${NSA_DEV})
+ NSB_LINKIP6=$(get_linklocal ${NSB} ${NSB_DEV})
+ NSC_LINKIP6=$(get_linklocal ${NSC} ${NSC_DEV})
+
+ create_vrf ${NSA} ${VRF} ${VRF_TABLE} "-" "-"
+ ip -netns ${NSA} link set dev ${NSA_DEV} vrf ${VRF}
+ ip -netns ${NSA} link set dev ${NSA_DEV2} vrf ${VRF}
+
+ set +e
+
+ sleep 1
+}
+
################################################################################
# IPv4
setup_cmd_nsb ip li del vlan100 2>/dev/null
}
+# VRF only.
+# ns-A device is connected to both ns-B and ns-C on a single VRF but only has
+# LLA on the interfaces
+use_case_ping_lla_multi()
+{
+ setup_lla_only
+ # only want reply from ns-A
+ setup_cmd_nsb sysctl -qw net.ipv6.icmp.echo_ignore_multicast=1
+ setup_cmd_nsc sysctl -qw net.ipv6.icmp.echo_ignore_multicast=1
+
+ log_start
+ run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV}
+ log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Pre cycle, ping out ns-B"
+
+ run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV}
+ log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Pre cycle, ping out ns-C"
+
+ # cycle/flap the first ns-A interface
+ setup_cmd ip link set ${NSA_DEV} down
+ setup_cmd ip link set ${NSA_DEV} up
+ sleep 1
+
+ log_start
+ run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV}
+ log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV}, ping out ns-B"
+ run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV}
+ log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV}, ping out ns-C"
+
+ # cycle/flap the second ns-A interface
+ setup_cmd ip link set ${NSA_DEV2} down
+ setup_cmd ip link set ${NSA_DEV2} up
+ sleep 1
+
+ log_start
+ run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV}
+ log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV2}, ping out ns-B"
+ run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV}
+ log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV2}, ping out ns-C"
+}
+
use_cases()
{
log_section "Use cases"
+ log_subsection "Device enslaved to bridge"
use_case_br
+ log_subsection "Ping LLA with multiple interfaces"
+ use_case_ping_lla_multi
}
################################################################################
interrupted = true;
break;
}
+
+ /* no events and more time to wait, do poll again */
+ continue;
}
if (pfd.revents != POLLIN)
error(1, errno, "poll: 0x%x expected 0x%x\n",