unsigned long flags;
int ret;
- /* If failover is pending don't schedule any other reset.
- spin_lock_irqsave(&adapter->rwi_lock, flags);
-
+ /*
+ * If failover is pending don't schedule any other reset.
* Instead let the failover complete. If there is already a
* a failover reset scheduled, we will detect and drop the
* duplicate reset when walking the ->rwi_list below.
struct tasklet_struct tasklet;
enum vnic_state state;
+ /* Used for serializatin of state field */
+ spinlock_t state_lock;
enum ibmvnic_reset_reason reset_reason;
+ /* when taking both state and rwi locks, take state lock first */
+ spinlock_t rwi_lock;
struct list_head rwi_list;
+ /* Used for serialization of rwi_list */
+ spinlock_t rwi_lock;
struct work_struct ibmvnic_reset;
struct delayed_work ibmvnic_delayed_reset;
unsigned long resetting;
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ bool sf_dev_allocated;
+
+ sf_dev_allocated = mlx5_sf_dev_allocated(dev);
+ if (sf_dev_allocated) {
+ /* Reload results in deleting SF device which further results in
+ * unregistering devlink instance while holding devlink_mutext.
+ * Hence, do not support reload.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated\n");
+ return -EOPNOTSUPP;
+ }
+ if (mlx5_lag_is_active(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode\n");
+ return -EOPNOTSUPP;
+ }
+
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
mlx5_unload_one(dev, false);
}
/* Use the same counter as the reverse direction */
- mutex_lock(&ct_priv->shared_counter_lock);
- rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
- tuples_ht_params);
- if (rev_entry) {
- if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
- mutex_unlock(&ct_priv->shared_counter_lock);
- return rev_entry->counter;
- }
+ spin_lock_bh(&ct_priv->ht_lock);
+ rev_entry = mlx5_tc_ct_entry_get(ct_priv, &rev_tuple);
+
+ if (IS_ERR(rev_entry)) {
+ spin_unlock_bh(&ct_priv->ht_lock);
+ goto create_counter;
}
- mutex_unlock(&ct_priv->shared_counter_lock);
+
+ if (rev_entry && refcount_inc_not_zero(&rev_entry->counter->refcount)) {
+ ct_dbg("Using shared counter entry=0x%p rev=0x%p\n", entry, rev_entry);
+ shared_counter = rev_entry->counter;
+ spin_unlock_bh(&ct_priv->ht_lock);
+
+ mlx5_tc_ct_entry_put(rev_entry);
+ return shared_counter;
+ }
+
+ spin_unlock_bh(&ct_priv->ht_lock);
+
+ create_counter:
shared_counter = mlx5_tc_ct_counter_create(ct_priv);
- if (IS_ERR(shared_counter)) {
- ret = PTR_ERR(shared_counter);
- return ERR_PTR(ret);
- }
+ if (IS_ERR(shared_counter))
+ return shared_counter;
shared_counter->is_shared = true;
refcount_set(&shared_counter->refcount, 1);
#include "en/devlink.h"
#include "lib/mlx5.h"
#include "en/ptp.h"
+#include "qos.h"
+#include "en/trap.h"
+ #include "fpga/ipsec.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
u32 buf_size = 0;
int i;
++<<<<<<< HEAD
+ if (MLX5_IPSEC_DEV(mdev))
++=======
+ #ifdef CONFIG_MLX5_EN_IPSEC
+ if (mlx5_fpga_is_ipsec_device(mdev))
++>>>>>>> 3af409ca278d4a8d50e91f9f7c4c33b175645cf3
byte_count += MLX5E_METADATA_ETHER_LEN;
-#endif
if (mlx5e_rx_is_linear_skb(params, xsk)) {
int frag_stride;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
- if (MLX5_IPSEC_DEV(mdev)) {
- netdev_err(netdev, "MPWQE RQ with IPSec offload not supported\n");
-#ifdef CONFIG_MLX5_EN_IPSEC
+ if (mlx5_fpga_is_ipsec_device(mdev)) {
+ netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
return -EINVAL;
}
-#endif
if (!rq->handle_rx_cqe) {
netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
return -EINVAL;
if (device_may_wakeup(tp_to_dev(tp))) {
phy_speed_down(tp->phydev, false);
- rtl_wol_suspend_quirk(tp);
- return;
+ rtl_wol_enable_rx(tp);
}
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
+ case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_37:
+ case RTL_GIGA_MAC_VER_39:
+ case RTL_GIGA_MAC_VER_43:
+ case RTL_GIGA_MAC_VER_44:
+ case RTL_GIGA_MAC_VER_45:
+ case RTL_GIGA_MAC_VER_46:
+ case RTL_GIGA_MAC_VER_47:
+ case RTL_GIGA_MAC_VER_48:
+ case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63:
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
+ break;
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ case RTL_GIGA_MAC_VER_49:
+ rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000);
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
+ break;
+ default:
+ break;
+ }
}
-static void rtl_pll_power_up(struct rtl8169_private *tp)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
- case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39:
- case RTL_GIGA_MAC_VER_43:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
- break;
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
- break;
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_49:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
- rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
- break;
- default:
- break;
- }
-
- phy_resume(tp->phydev);
-}
-
static void rtl_init_rxcfg(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 1 << 1, /* Part of an existing connection. */
TCA_FLOWER_KEY_CT_FLAGS_RELATED = 1 << 2, /* Related to an established connection. */
TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 1 << 3, /* Conntrack has occurred. */
-
+ TCA_FLOWER_KEY_CT_FLAGS_INVALID = 1 << 4, /* Conntrack is invalid. */
+ TCA_FLOWER_KEY_CT_FLAGS_REPLY = 1 << 5, /* Packet is in the reply direction. */
+ __TCA_FLOWER_KEY_CT_FLAGS_MAX,
};
enum {
subflow_req->mp_join = 0;
subflow_req->msk = NULL;
mptcp_token_init_request(req);
-
- #ifdef CONFIG_TCP_MD5SIG
- /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
- * TCP option space.
- */
- if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
- return -EINVAL;
- #endif
-
- return 0;
}
+static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
+{
+ return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
+}
+
/* Init mptcp request socket.
*
* Returns an error code if a JOIN has failed and a TCP reset
static void subflow_write_space(struct sock *ssk)
{
- /* we take action in __mptcp_clean_una() */
+ struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
+
+ mptcp_propagate_sndbuf(sk, ssk);
+ mptcp_write_space(sk);
}
+ void __mptcp_error_report(struct sock *sk)
+ {
+ struct mptcp_subflow_context *subflow;
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ int err = sock_error(ssk);
+
+ if (!err)
+ continue;
+
+ /* only propagate errors on fallen-back sockets or
+ * on MPC connect
+ */
+ if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
+ continue;
+
+ inet_sk_state_store(sk, inet_sk_state_load(ssk));
+ sk->sk_err = -err;
+
+ /* This barrier is coupled with smp_rmb() in mptcp_poll() */
+ smp_wmb();
+ sk->sk_error_report(sk);
+ break;
+ }
+ }
+
+ static void subflow_error_report(struct sock *ssk)
+ {
+ struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
+
+ mptcp_data_lock(sk);
+ if (!sock_owned_by_user(sk))
+ __mptcp_error_report(sk);
+ else
+ set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags);
+ mptcp_data_unlock(sk);
+ }
+
static struct inet_connection_sock_af_ops *
subflow_default_af_ops(struct sock *sk)
{
ALL_TESTS="match_dst_mac_test match_src_mac_test match_dst_ip_test \
match_src_ip_test match_ip_flags_test match_pcp_test match_vlan_test \
- match_ip_tos_test match_indev_test match_mpls_label_test \
- match_ip_tos_test match_indev_test match_ip_ttl_test"
++ match_ip_tos_test match_indev_testmatch_ip_ttl_test match_mpls_label_test \
+ match_mpls_tc_test match_mpls_bos_test match_mpls_ttl_test \
+ match_mpls_lse_test"
NUM_NETIFS=2
source tc_common.sh
source lib.sh