// SPDX-License-Identifier: GPL-2.0-or-later1#include <linux/skbuff.h>2#include <linux/sctp.h>3#include <net/gso.h>4#include <net/gro.h>56/**7* skb_eth_gso_segment - segmentation handler for ethernet protocols.8* @skb: buffer to segment9* @features: features for the output path (see dev->features)10* @type: Ethernet Protocol ID11*/12struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,13netdev_features_t features, __be16 type)14{15struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);16struct packet_offload *ptype;1718rcu_read_lock();19list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) {20if (ptype->type == type && ptype->callbacks.gso_segment) {21segs = ptype->callbacks.gso_segment(skb, features);22break;23}24}25rcu_read_unlock();2627return segs;28}29EXPORT_SYMBOL(skb_eth_gso_segment);3031/**32* skb_mac_gso_segment - mac layer segmentation handler.33* @skb: buffer to segment34* @features: features for the output path (see dev->features)35*/36struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,37netdev_features_t features)38{39struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);40struct packet_offload *ptype;41int vlan_depth = skb->mac_len;42__be16 type = skb_network_protocol(skb, &vlan_depth);4344if (unlikely(!type))45return ERR_PTR(-EINVAL);4647__skb_pull(skb, vlan_depth);4849rcu_read_lock();50list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) {51if (ptype->type == type && ptype->callbacks.gso_segment) {52segs = ptype->callbacks.gso_segment(skb, features);53break;54}55}56rcu_read_unlock();5758__skb_push(skb, skb->data - skb_mac_header(skb));5960return segs;61}62EXPORT_SYMBOL(skb_mac_gso_segment);63/* openvswitch calls this on rx path, so we need a different check.64*/65static bool skb_needs_check(const struct sk_buff *skb, bool tx_path)66{67if (tx_path)68return skb->ip_summed != CHECKSUM_PARTIAL &&69skb->ip_summed != CHECKSUM_UNNECESSARY;7071return skb->ip_summed == CHECKSUM_NONE;72}7374/**75* __skb_gso_segment - Perform segmentation on skb.76* @skb: buffer to segment77* @features: features for the output path (see dev->features)78* @tx_path: whether it is called in TX path79*80* This function segments the given skb and returns a list of segments.81*82* It may return NULL if the skb requires no segmentation. This is83* only possible when GSO is used for verifying header integrity.84*85* Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb.86*/87struct sk_buff *__skb_gso_segment(struct sk_buff *skb,88netdev_features_t features, bool tx_path)89{90struct sk_buff *segs;9192if (unlikely(skb_needs_check(skb, tx_path))) {93int err;9495/* We're going to init ->check field in TCP or UDP header */96err = skb_cow_head(skb, 0);97if (err < 0)98return ERR_PTR(err);99}100101/* Only report GSO partial support if it will enable us to102* support segmentation on this frame without needing additional103* work.104*/105if (features & NETIF_F_GSO_PARTIAL) {106netdev_features_t partial_features = NETIF_F_GSO_ROBUST;107struct net_device *dev = skb->dev;108109partial_features |= dev->features & dev->gso_partial_features;110if (!skb_gso_ok(skb, features | partial_features))111features &= ~NETIF_F_GSO_PARTIAL;112}113114BUILD_BUG_ON(SKB_GSO_CB_OFFSET +115sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));116117SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);118SKB_GSO_CB(skb)->encap_level = 0;119120skb_reset_mac_header(skb);121skb_reset_mac_len(skb);122123segs = skb_mac_gso_segment(skb, features);124125if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))126skb_warn_bad_offload(skb);127128return segs;129}130EXPORT_SYMBOL(__skb_gso_segment);131132/**133* skb_gso_transport_seglen - Return length of individual segments of a gso packet134*135* @skb: GSO skb136*137* skb_gso_transport_seglen is used to determine the real size of the138* individual segments, including Layer4 headers (TCP/UDP).139*140* The MAC/L2 or network (IP, IPv6) headers are not accounted for.141*/142static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)143{144const struct skb_shared_info *shinfo = skb_shinfo(skb);145unsigned int thlen = 0;146147if (skb->encapsulation) {148thlen = skb_inner_transport_header(skb) -149skb_transport_header(skb);150151if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))152thlen += inner_tcp_hdrlen(skb);153} else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {154thlen = tcp_hdrlen(skb);155} else if (unlikely(skb_is_gso_sctp(skb))) {156thlen = sizeof(struct sctphdr);157} else if (shinfo->gso_type & SKB_GSO_UDP_L4) {158thlen = sizeof(struct udphdr);159}160/* UFO sets gso_size to the size of the fragmentation161* payload, i.e. the size of the L4 (UDP) header is already162* accounted for.163*/164return thlen + shinfo->gso_size;165}166167/**168* skb_gso_network_seglen - Return length of individual segments of a gso packet169*170* @skb: GSO skb171*172* skb_gso_network_seglen is used to determine the real size of the173* individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).174*175* The MAC/L2 header is not accounted for.176*/177static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)178{179unsigned int hdr_len = skb_transport_header(skb) -180skb_network_header(skb);181182return hdr_len + skb_gso_transport_seglen(skb);183}184185/**186* skb_gso_mac_seglen - Return length of individual segments of a gso packet187*188* @skb: GSO skb189*190* skb_gso_mac_seglen is used to determine the real size of the191* individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4192* headers (TCP/UDP).193*/194static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)195{196unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);197198return hdr_len + skb_gso_transport_seglen(skb);199}200201/**202* skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS203*204* There are a couple of instances where we have a GSO skb, and we205* want to determine what size it would be after it is segmented.206*207* We might want to check:208* - L3+L4+payload size (e.g. IP forwarding)209* - L2+L3+L4+payload size (e.g. sanity check before passing to driver)210*211* This is a helper to do that correctly considering GSO_BY_FRAGS.212*213* @skb: GSO skb214*215* @seg_len: The segmented length (from skb_gso_*_seglen). In the216* GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].217*218* @max_len: The maximum permissible length.219*220* Returns true if the segmented length <= max length.221*/222static inline bool skb_gso_size_check(const struct sk_buff *skb,223unsigned int seg_len,224unsigned int max_len) {225const struct skb_shared_info *shinfo = skb_shinfo(skb);226const struct sk_buff *iter;227228if (shinfo->gso_size != GSO_BY_FRAGS)229return seg_len <= max_len;230231/* Undo this so we can re-use header sizes */232seg_len -= GSO_BY_FRAGS;233234skb_walk_frags(skb, iter) {235if (seg_len + skb_headlen(iter) > max_len)236return false;237}238239return true;240}241242/**243* skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?244*245* @skb: GSO skb246* @mtu: MTU to validate against247*248* skb_gso_validate_network_len validates if a given skb will fit a249* wanted MTU once split. It considers L3 headers, L4 headers, and the250* payload.251*/252bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)253{254return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);255}256EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);257258/**259* skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?260*261* @skb: GSO skb262* @len: length to validate against263*264* skb_gso_validate_mac_len validates if a given skb will fit a wanted265* length once split, including L2, L3 and L4 headers and the payload.266*/267bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)268{269return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);270}271EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);272273274275