diff --git a/net/core/dev.c b/net/core/dev.c index 2acfa44..02e304b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -176,6 +176,10 @@ static int call_netdevice_notifiers_extack(unsigned long val, struct net_device *dev, struct netlink_ext_ack *extack); +#if defined(CONFIG_CPE_FAST_PATH) +static fp_iface_stats_get fast_path_stats_get; +#endif + static DEFINE_MUTEX(ifalias_mutex); /* protects napi_hash addition/deletion and napi_gen_id */ @@ -4002,9 +4006,15 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device skb = segs; } } else { - if (skb_needs_linearize(skb, features) && - __skb_linearize(skb)) - goto out_kfree_skb; + /* Linearize only if IPsec policy is not selected. */ +#if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD) + if (!skb->ipsec_offload) +#endif + { + if (skb_needs_linearize(skb, features) && + __skb_linearize(skb)) + goto out_kfree_skb; + } /* If packet is not checksummed and device does not * support checksumming for this protocol, complete @@ -4792,6 +4802,40 @@ out: } EXPORT_SYMBOL(__dev_queue_xmit); +#if defined(CONFIG_CPE_FAST_PATH) +/* WiFi IPsec offload hook - allows cdx to intercept packets for IPsec + * processing when the packet is transmitted on a wifi interface. + */ +dpaa_wifi_xmit_local_hook_t dpaa_wifi_xmit_local_ipsec_handler; +EXPORT_SYMBOL(dpaa_wifi_xmit_local_ipsec_handler); + +/* Register a hook function for IPsec offload on wifi interfaces. */ +int dpa_register_wifi_xmit_local_hook(dpaa_wifi_xmit_local_hook_t hookfn) +{ + if (dpaa_wifi_xmit_local_ipsec_handler) { + pr_warn("%s: hook already registered\n", __func__); + return -1; + } + dpaa_wifi_xmit_local_ipsec_handler = hookfn; + return 0; +} +EXPORT_SYMBOL(dpa_register_wifi_xmit_local_hook); + +/* Unregister the IPsec offload hook. */ +void dpa_unregister_wifi_xmit_local_hook(void) +{ + dpaa_wifi_xmit_local_ipsec_handler = NULL; +} +EXPORT_SYMBOL(dpa_unregister_wifi_xmit_local_hook); + +/* Original dev_queue_xmit - called when wifi hook is not applicable. */ +int original_dev_queue_xmit(struct sk_buff *skb) +{ + return __dev_queue_xmit(skb, NULL); +} +EXPORT_SYMBOL(original_dev_queue_xmit); +#endif + int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id) { struct net_device *dev = skb->dev; @@ -5862,6 +5906,15 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, trace_netif_receive_skb(skb); +#ifdef CONFIG_CPE_FAST_PATH + /* ifindex of device we arrived on, now skb->skb_iif + * always tracks skb->dev. + */ + if (!skb->iif_index) + skb->iif_index = skb->dev->ifindex; + if (!skb->underlying_iif) + skb->underlying_iif = skb->dev->ifindex; +#endif orig_dev = skb->dev; skb_reset_network_header(skb); @@ -7627,9 +7680,9 @@ static int __napi_poll(struct napi_struct *n, bool *repoll) return work; } /* Flush too old packets. If HZ < 1000, flush all packets */ - gro_flush_normal(&n->gro, HZ >= 1000); + gro_flush(&n->gro, HZ >= 1000); /* Some drivers may have called napi_schedule * prior to exhausting their budget. */ @@ -11762,10 +11812,28 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped); } } +#if defined(CONFIG_CPE_FAST_PATH) + if (fast_path_stats_get) + fast_path_stats_get(dev, storage); +#endif return storage; } EXPORT_SYMBOL(dev_get_stats); +#if defined(CONFIG_CPE_FAST_PATH) +void dev_fp_stats_get_register(fp_iface_stats_get func) +{ + fast_path_stats_get = func; +} +EXPORT_SYMBOL(dev_fp_stats_get_register); + +void dev_fp_stats_get_deregister(void) +{ + fast_path_stats_get = NULL; +} +EXPORT_SYMBOL(dev_fp_stats_get_deregister); +#endif + /** * dev_fetch_sw_netstats - get per-cpu network device statistics * @s: place to store stats