...

Source file src/k8s.io/kubernetes/pkg/proxy/nftables/proxier_test.go

Documentation: k8s.io/kubernetes/pkg/proxy/nftables

     1  //go:build linux
     2  // +build linux
     3  
     4  /*
     5  Copyright 2015 The Kubernetes Authors.
     6  
     7  Licensed under the Apache License, Version 2.0 (the "License");
     8  you may not use this file except in compliance with the License.
     9  You may obtain a copy of the License at
    10  
    11      http://www.apache.org/licenses/LICENSE-2.0
    12  
    13  Unless required by applicable law or agreed to in writing, software
    14  distributed under the License is distributed on an "AS IS" BASIS,
    15  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    16  See the License for the specific language governing permissions and
    17  limitations under the License.
    18  */
    19  
    20  package nftables
    21  
    22  import (
    23  	"fmt"
    24  	"net"
    25  	"reflect"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/lithammer/dedent"
    30  	"github.com/stretchr/testify/assert"
    31  
    32  	v1 "k8s.io/api/core/v1"
    33  	discovery "k8s.io/api/discovery/v1"
    34  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    35  	"k8s.io/apimachinery/pkg/types"
    36  	"k8s.io/apimachinery/pkg/util/intstr"
    37  	"k8s.io/apimachinery/pkg/util/sets"
    38  	utilfeature "k8s.io/apiserver/pkg/util/feature"
    39  	featuregatetesting "k8s.io/component-base/featuregate/testing"
    40  	"k8s.io/component-base/metrics/testutil"
    41  	"k8s.io/kubernetes/pkg/features"
    42  	"k8s.io/kubernetes/pkg/proxy"
    43  	"k8s.io/kubernetes/pkg/proxy/conntrack"
    44  	"k8s.io/kubernetes/pkg/proxy/healthcheck"
    45  	"k8s.io/kubernetes/pkg/proxy/metrics"
    46  	proxyutil "k8s.io/kubernetes/pkg/proxy/util"
    47  	proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
    48  	proxyutiltest "k8s.io/kubernetes/pkg/proxy/util/testing"
    49  	"k8s.io/kubernetes/pkg/util/async"
    50  	netutils "k8s.io/utils/net"
    51  	"k8s.io/utils/ptr"
    52  	"sigs.k8s.io/knftables"
    53  )
    54  
    55  // Conventions for tests using NewFakeProxier:
    56  //
    57  // Pod IPs:             10.0.0.0/8
    58  // Service ClusterIPs:  172.30.0.0/16
    59  // Node IPs:            192.168.0.0/24
    60  // Local Node IP:       192.168.0.2
    61  // Service ExternalIPs: 192.168.99.0/24
    62  // LoadBalancer IPs:    1.2.3.4, 5.6.7.8, 9.10.11.12
    63  // Non-cluster IPs:     203.0.113.0/24
    64  // LB Source Range:     203.0.113.0/25
    65  
    66  const testHostname = "test-hostname"
    67  const testNodeIP = "192.168.0.2"
    68  const testNodeIPAlt = "192.168.1.2"
    69  const testExternalIP = "192.168.99.11"
    70  const testNodeIPv6 = "2001:db8::1"
    71  const testNodeIPv6Alt = "2001:db8:1::2"
    72  const testExternalClient = "203.0.113.2"
    73  const testExternalClientBlocked = "203.0.113.130"
    74  
    75  var testNodeIPs = []string{testNodeIP, testNodeIPAlt, testExternalIP, testNodeIPv6, testNodeIPv6Alt}
    76  
    77  func NewFakeProxier(ipFamily v1.IPFamily) (*knftables.Fake, *Proxier) {
    78  	// TODO: Call NewProxier after refactoring out the goroutine
    79  	// invocation into a Run() method.
    80  	nftablesFamily := knftables.IPv4Family
    81  	podCIDR := "10.0.0.0/8"
    82  	serviceCIDRs := "172.30.0.0/16"
    83  	if ipFamily == v1.IPv6Protocol {
    84  		nftablesFamily = knftables.IPv6Family
    85  		podCIDR = "fd00:10::/64"
    86  		serviceCIDRs = "fd00:10:96::/112"
    87  	}
    88  	detectLocal, _ := proxyutiliptables.NewDetectLocalByCIDR(podCIDR)
    89  
    90  	networkInterfacer := proxyutiltest.NewFakeNetwork()
    91  	itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
    92  	addrs := []net.Addr{
    93  		&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)},
    94  		&net.IPNet{IP: netutils.ParseIPSloppy("::1/128"), Mask: net.CIDRMask(128, 128)},
    95  	}
    96  	networkInterfacer.AddInterfaceAddr(&itf, addrs)
    97  	itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}
    98  	addrs1 := []net.Addr{
    99  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIP), Mask: net.CIDRMask(24, 32)},
   100  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPAlt), Mask: net.CIDRMask(24, 32)},
   101  		&net.IPNet{IP: netutils.ParseIPSloppy(testExternalIP), Mask: net.CIDRMask(24, 32)},
   102  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPv6), Mask: net.CIDRMask(64, 128)},
   103  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPv6Alt), Mask: net.CIDRMask(64, 128)},
   104  	}
   105  	networkInterfacer.AddInterfaceAddr(&itf1, addrs1)
   106  
   107  	nft := knftables.NewFake(nftablesFamily, kubeProxyTable)
   108  
   109  	var nodeIP net.IP
   110  	if ipFamily == v1.IPv4Protocol {
   111  		nodeIP = netutils.ParseIPSloppy(testNodeIP)
   112  	} else {
   113  		nodeIP = netutils.ParseIPSloppy(testNodeIPv6)
   114  	}
   115  	p := &Proxier{
   116  		ipFamily:            ipFamily,
   117  		svcPortMap:          make(proxy.ServicePortMap),
   118  		serviceChanges:      proxy.NewServiceChangeTracker(newServiceInfo, ipFamily, nil, nil),
   119  		endpointsMap:        make(proxy.EndpointsMap),
   120  		endpointsChanges:    proxy.NewEndpointsChangeTracker(testHostname, newEndpointInfo, ipFamily, nil, nil),
   121  		nftables:            nft,
   122  		masqueradeMark:      "0x4000",
   123  		conntrack:           conntrack.NewFake(),
   124  		localDetector:       detectLocal,
   125  		hostname:            testHostname,
   126  		serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
   127  		nodeIP:              nodeIP,
   128  		nodePortAddresses:   proxyutil.NewNodePortAddresses(ipFamily, nil, nodeIP),
   129  		networkInterfacer:   networkInterfacer,
   130  		staleChains:         make(map[string]time.Time),
   131  		serviceCIDRs:        serviceCIDRs,
   132  	}
   133  	p.setInitialized(true)
   134  	p.syncRunner = async.NewBoundedFrequencyRunner("test-sync-runner", p.syncProxyRules, 0, time.Minute, 1)
   135  
   136  	return nft, p
   137  }
   138  
   139  // TestOverallNFTablesRules creates a variety of services and verifies that the generated
   140  // rules are exactly as expected.
   141  func TestOverallNFTablesRules(t *testing.T) {
   142  	nft, fp := NewFakeProxier(v1.IPv4Protocol)
   143  	metrics.RegisterMetrics()
   144  
   145  	makeServiceMap(fp,
   146  		// create ClusterIP service
   147  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
   148  			svc.Spec.ClusterIP = "172.30.0.41"
   149  			svc.Spec.Ports = []v1.ServicePort{{
   150  				Name:     "p80",
   151  				Port:     80,
   152  				Protocol: v1.ProtocolTCP,
   153  			}}
   154  		}),
   155  		// create LoadBalancer service with Local traffic policy
   156  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
   157  			svc.Spec.Type = "LoadBalancer"
   158  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
   159  			svc.Spec.ClusterIP = "172.30.0.42"
   160  			svc.Spec.Ports = []v1.ServicePort{{
   161  				Name:     "p80",
   162  				Port:     80,
   163  				Protocol: v1.ProtocolTCP,
   164  				NodePort: 3001,
   165  			}}
   166  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
   167  				IP: "1.2.3.4",
   168  			}}
   169  			svc.Spec.ExternalIPs = []string{"192.168.99.22"}
   170  			svc.Spec.HealthCheckNodePort = 30000
   171  		}),
   172  		// create NodePort service
   173  		makeTestService("ns3", "svc3", func(svc *v1.Service) {
   174  			svc.Spec.Type = "NodePort"
   175  			svc.Spec.ClusterIP = "172.30.0.43"
   176  			svc.Spec.Ports = []v1.ServicePort{{
   177  				Name:     "p80",
   178  				Port:     80,
   179  				Protocol: v1.ProtocolTCP,
   180  				NodePort: 3003,
   181  			}}
   182  		}),
   183  		// create ExternalIP service
   184  		makeTestService("ns4", "svc4", func(svc *v1.Service) {
   185  			svc.Spec.Type = "NodePort"
   186  			svc.Spec.ClusterIP = "172.30.0.44"
   187  			svc.Spec.ExternalIPs = []string{"192.168.99.33"}
   188  			svc.Spec.Ports = []v1.ServicePort{{
   189  				Name:       "p80",
   190  				Port:       80,
   191  				Protocol:   v1.ProtocolTCP,
   192  				TargetPort: intstr.FromInt32(80),
   193  			}}
   194  		}),
   195  		// create LoadBalancer service with Cluster traffic policy, source ranges,
   196  		// and session affinity
   197  		makeTestService("ns5", "svc5", func(svc *v1.Service) {
   198  			svc.Spec.Type = "LoadBalancer"
   199  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster
   200  			svc.Spec.ClusterIP = "172.30.0.45"
   201  			svc.Spec.Ports = []v1.ServicePort{{
   202  				Name:     "p80",
   203  				Port:     80,
   204  				Protocol: v1.ProtocolTCP,
   205  				NodePort: 3002,
   206  			}}
   207  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
   208  				IP: "5.6.7.8",
   209  			}}
   210  			svc.Spec.HealthCheckNodePort = 30000
   211  			// Extra whitespace to ensure that invalid value will not result
   212  			// in a crash, for backward compatibility.
   213  			svc.Spec.LoadBalancerSourceRanges = []string{" 203.0.113.0/25"}
   214  
   215  			svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
   216  			svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
   217  				ClientIP: &v1.ClientIPConfig{
   218  					TimeoutSeconds: ptr.To[int32](10800),
   219  				},
   220  			}
   221  		}),
   222  		// create ClusterIP service with no endpoints
   223  		makeTestService("ns6", "svc6", func(svc *v1.Service) {
   224  			svc.Spec.Type = "ClusterIP"
   225  			svc.Spec.ClusterIP = "172.30.0.46"
   226  			svc.Spec.Ports = []v1.ServicePort{{
   227  				Name:       "p80",
   228  				Port:       80,
   229  				Protocol:   v1.ProtocolTCP,
   230  				TargetPort: intstr.FromInt32(80),
   231  			}}
   232  		}),
   233  	)
   234  	populateEndpointSlices(fp,
   235  		// create ClusterIP service endpoints
   236  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
   237  			eps.AddressType = discovery.AddressTypeIPv4
   238  			eps.Endpoints = []discovery.Endpoint{{
   239  				Addresses: []string{"10.180.0.1"},
   240  			}}
   241  			eps.Ports = []discovery.EndpointPort{{
   242  				Name:     ptr.To("p80"),
   243  				Port:     ptr.To[int32](80),
   244  				Protocol: ptr.To(v1.ProtocolTCP),
   245  			}}
   246  		}),
   247  		// create Local LoadBalancer endpoints. Note that since we aren't setting
   248  		// its NodeName, this endpoint will be considered non-local and ignored.
   249  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
   250  			eps.AddressType = discovery.AddressTypeIPv4
   251  			eps.Endpoints = []discovery.Endpoint{{
   252  				Addresses: []string{"10.180.0.2"},
   253  			}}
   254  			eps.Ports = []discovery.EndpointPort{{
   255  				Name:     ptr.To("p80"),
   256  				Port:     ptr.To[int32](80),
   257  				Protocol: ptr.To(v1.ProtocolTCP),
   258  			}}
   259  		}),
   260  		// create NodePort service endpoints
   261  		makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
   262  			eps.AddressType = discovery.AddressTypeIPv4
   263  			eps.Endpoints = []discovery.Endpoint{{
   264  				Addresses: []string{"10.180.0.3"},
   265  			}}
   266  			eps.Ports = []discovery.EndpointPort{{
   267  				Name:     ptr.To("p80"),
   268  				Port:     ptr.To[int32](80),
   269  				Protocol: ptr.To(v1.ProtocolTCP),
   270  			}}
   271  		}),
   272  		// create ExternalIP service endpoints
   273  		makeTestEndpointSlice("ns4", "svc4", 1, func(eps *discovery.EndpointSlice) {
   274  			eps.AddressType = discovery.AddressTypeIPv4
   275  			eps.Endpoints = []discovery.Endpoint{{
   276  				Addresses: []string{"10.180.0.4"},
   277  			}, {
   278  				Addresses: []string{"10.180.0.5"},
   279  				NodeName:  ptr.To(testHostname),
   280  			}}
   281  			eps.Ports = []discovery.EndpointPort{{
   282  				Name:     ptr.To("p80"),
   283  				Port:     ptr.To[int32](80),
   284  				Protocol: ptr.To(v1.ProtocolTCP),
   285  			}}
   286  		}),
   287  		// create Cluster LoadBalancer endpoints
   288  		makeTestEndpointSlice("ns5", "svc5", 1, func(eps *discovery.EndpointSlice) {
   289  			eps.AddressType = discovery.AddressTypeIPv4
   290  			eps.Endpoints = []discovery.Endpoint{{
   291  				Addresses: []string{"10.180.0.3"},
   292  			}}
   293  			eps.Ports = []discovery.EndpointPort{{
   294  				Name:     ptr.To("p80"),
   295  				Port:     ptr.To[int32](80),
   296  				Protocol: ptr.To(v1.ProtocolTCP),
   297  			}}
   298  		}),
   299  	)
   300  
   301  	fp.syncProxyRules()
   302  
   303  	expected := dedent.Dedent(`
   304  		add table ip kube-proxy { comment "rules for kube-proxy" ; }
   305  
   306  		add chain ip kube-proxy mark-for-masquerade
   307  		add rule ip kube-proxy mark-for-masquerade mark set mark or 0x4000
   308  		add chain ip kube-proxy masquerading
   309  		add rule ip kube-proxy masquerading mark and 0x4000 == 0 return
   310  		add rule ip kube-proxy masquerading mark set mark xor 0x4000
   311  		add rule ip kube-proxy masquerading masquerade fully-random
   312  		add chain ip kube-proxy services
   313  		add chain ip kube-proxy service-endpoints-check
   314  		add rule ip kube-proxy service-endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services
   315  		add chain ip kube-proxy filter-prerouting { type filter hook prerouting priority -110 ; }
   316  		add rule ip kube-proxy filter-prerouting ct state new jump firewall-check
   317  		add chain ip kube-proxy filter-forward { type filter hook forward priority -110 ; }
   318  		add rule ip kube-proxy filter-forward ct state new jump service-endpoints-check
   319  		add rule ip kube-proxy filter-forward ct state new jump cluster-ips-check
   320  		add chain ip kube-proxy filter-input { type filter hook input priority -110 ; }
   321  		add rule ip kube-proxy filter-input ct state new jump nodeport-endpoints-check
   322  		add rule ip kube-proxy filter-input ct state new jump service-endpoints-check
   323  		add chain ip kube-proxy filter-output { type filter hook output priority -110 ; }
   324  		add rule ip kube-proxy filter-output ct state new jump service-endpoints-check
   325  		add rule ip kube-proxy filter-output ct state new jump firewall-check
   326  		add chain ip kube-proxy filter-output-post-dnat { type filter hook output priority -90 ; }
   327  		add rule ip kube-proxy filter-output-post-dnat ct state new jump cluster-ips-check
   328  		add chain ip kube-proxy nat-output { type nat hook output priority -100 ; }
   329  		add rule ip kube-proxy nat-output jump services
   330  		add chain ip kube-proxy nat-postrouting { type nat hook postrouting priority 100 ; }
   331  		add rule ip kube-proxy nat-postrouting jump masquerading
   332  		add chain ip kube-proxy nat-prerouting { type nat hook prerouting priority -100 ; }
   333  		add rule ip kube-proxy nat-prerouting jump services
   334  		add chain ip kube-proxy nodeport-endpoints-check
   335  		add rule ip kube-proxy nodeport-endpoints-check ip daddr @nodeport-ips meta l4proto . th dport vmap @no-endpoint-nodeports
   336  
   337  		add set ip kube-proxy cluster-ips { type ipv4_addr ; comment "Active ClusterIPs" ; }
   338  		add chain ip kube-proxy cluster-ips-check
   339  		add rule ip kube-proxy cluster-ips-check ip daddr @cluster-ips reject comment "Reject traffic to invalid ports of ClusterIPs"
   340  		add rule ip kube-proxy cluster-ips-check ip daddr { 172.30.0.0/16 } drop comment "Drop traffic to unallocated ClusterIPs"
   341  
   342  		add set ip kube-proxy nodeport-ips { type ipv4_addr ; comment "IPs that accept NodePort traffic" ; }
   343  		add map ip kube-proxy firewall-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "destinations that are subject to LoadBalancerSourceRanges" ; }
   344  		add chain ip kube-proxy firewall-check
   345  		add rule ip kube-proxy firewall-check ip daddr . meta l4proto . th dport vmap @firewall-ips
   346  
   347  		add chain ip kube-proxy reject-chain { comment "helper for @no-endpoint-services / @no-endpoint-nodeports" ; }
   348  		add rule ip kube-proxy reject-chain reject
   349  
   350  		add map ip kube-proxy no-endpoint-services { type ipv4_addr . inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to services with no endpoints" ; }
   351  		add map ip kube-proxy no-endpoint-nodeports { type inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to service nodeports with no endpoints" ; }
   352  
   353  		add map ip kube-proxy service-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "ClusterIP, ExternalIP and LoadBalancer IP traffic" ; }
   354  		add map ip kube-proxy service-nodeports { type inet_proto . inet_service : verdict ; comment "NodePort traffic" ; }
   355  		add rule ip kube-proxy services ip daddr . meta l4proto . th dport vmap @service-ips
   356  		add rule ip kube-proxy services ip daddr @nodeport-ips meta l4proto . th dport vmap @service-nodeports
   357  		add element ip kube-proxy nodeport-ips { 192.168.0.2 }
   358  
   359  		# svc1
   360  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
   361  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
   362  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 }
   363  
   364  		add chain ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80
   365  		add rule ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 ip saddr 10.180.0.1 jump mark-for-masquerade
   366  		add rule ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 meta l4proto tcp dnat to 10.180.0.1:80
   367  
   368  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
   369  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
   370  
   371  		# svc2
   372  		add chain ip kube-proxy service-42NFTM6N-ns2/svc2/tcp/p80
   373  		add rule ip kube-proxy service-42NFTM6N-ns2/svc2/tcp/p80 ip daddr 172.30.0.42 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
   374  		add rule ip kube-proxy service-42NFTM6N-ns2/svc2/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-SGOXE6O3-ns2/svc2/tcp/p80__10.180.0.2/80 }
   375  		add chain ip kube-proxy external-42NFTM6N-ns2/svc2/tcp/p80
   376  		add rule ip kube-proxy external-42NFTM6N-ns2/svc2/tcp/p80 ip saddr 10.0.0.0/8 goto service-42NFTM6N-ns2/svc2/tcp/p80 comment "short-circuit pod traffic"
   377  		add rule ip kube-proxy external-42NFTM6N-ns2/svc2/tcp/p80 fib saddr type local jump mark-for-masquerade comment "masquerade local traffic"
   378  		add rule ip kube-proxy external-42NFTM6N-ns2/svc2/tcp/p80 fib saddr type local goto service-42NFTM6N-ns2/svc2/tcp/p80 comment "short-circuit local traffic"
   379  		add chain ip kube-proxy endpoint-SGOXE6O3-ns2/svc2/tcp/p80__10.180.0.2/80
   380  		add rule ip kube-proxy endpoint-SGOXE6O3-ns2/svc2/tcp/p80__10.180.0.2/80 ip saddr 10.180.0.2 jump mark-for-masquerade
   381  		add rule ip kube-proxy endpoint-SGOXE6O3-ns2/svc2/tcp/p80__10.180.0.2/80 meta l4proto tcp dnat to 10.180.0.2:80
   382  
   383  		add element ip kube-proxy cluster-ips { 172.30.0.42 }
   384  		add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 80 : goto service-42NFTM6N-ns2/svc2/tcp/p80 }
   385  		add element ip kube-proxy service-ips { 192.168.99.22 . tcp . 80 : goto external-42NFTM6N-ns2/svc2/tcp/p80 }
   386  		add element ip kube-proxy service-ips { 1.2.3.4 . tcp . 80 : goto external-42NFTM6N-ns2/svc2/tcp/p80 }
   387  		add element ip kube-proxy service-nodeports { tcp . 3001 : goto external-42NFTM6N-ns2/svc2/tcp/p80 }
   388  
   389  		add element ip kube-proxy no-endpoint-nodeports { tcp . 3001 comment "ns2/svc2:p80" : drop }
   390  		add element ip kube-proxy no-endpoint-services { 1.2.3.4 . tcp . 80 comment "ns2/svc2:p80" : drop }
   391  		add element ip kube-proxy no-endpoint-services { 192.168.99.22 . tcp . 80 comment "ns2/svc2:p80" : drop }
   392  
   393  		# svc3
   394  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
   395  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
   396  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-UEIP74TE-ns3/svc3/tcp/p80__10.180.0.3/80 }
   397  		add chain ip kube-proxy external-4AT6LBPK-ns3/svc3/tcp/p80
   398  		add rule ip kube-proxy external-4AT6LBPK-ns3/svc3/tcp/p80 jump mark-for-masquerade
   399  		add rule ip kube-proxy external-4AT6LBPK-ns3/svc3/tcp/p80 goto service-4AT6LBPK-ns3/svc3/tcp/p80
   400  		add chain ip kube-proxy endpoint-UEIP74TE-ns3/svc3/tcp/p80__10.180.0.3/80
   401  		add rule ip kube-proxy endpoint-UEIP74TE-ns3/svc3/tcp/p80__10.180.0.3/80 ip saddr 10.180.0.3 jump mark-for-masquerade
   402  		add rule ip kube-proxy endpoint-UEIP74TE-ns3/svc3/tcp/p80__10.180.0.3/80 meta l4proto tcp dnat to 10.180.0.3:80
   403  
   404  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
   405  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
   406  		add element ip kube-proxy service-nodeports { tcp . 3003 : goto external-4AT6LBPK-ns3/svc3/tcp/p80 }
   407  
   408  		# svc4
   409  		add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
   410  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
   411  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 2 vmap { 0 : goto endpoint-UNZV3OEC-ns4/svc4/tcp/p80__10.180.0.4/80 , 1 : goto endpoint-5RFCDDV7-ns4/svc4/tcp/p80__10.180.0.5/80 }
   412  		add chain ip kube-proxy external-LAUZTJTB-ns4/svc4/tcp/p80
   413  		add rule ip kube-proxy external-LAUZTJTB-ns4/svc4/tcp/p80 jump mark-for-masquerade
   414  		add rule ip kube-proxy external-LAUZTJTB-ns4/svc4/tcp/p80 goto service-LAUZTJTB-ns4/svc4/tcp/p80
   415  		add chain ip kube-proxy endpoint-5RFCDDV7-ns4/svc4/tcp/p80__10.180.0.5/80
   416  		add rule ip kube-proxy endpoint-5RFCDDV7-ns4/svc4/tcp/p80__10.180.0.5/80 ip saddr 10.180.0.5 jump mark-for-masquerade
   417  		add rule ip kube-proxy endpoint-5RFCDDV7-ns4/svc4/tcp/p80__10.180.0.5/80 meta l4proto tcp dnat to 10.180.0.5:80
   418  		add chain ip kube-proxy endpoint-UNZV3OEC-ns4/svc4/tcp/p80__10.180.0.4/80
   419  		add rule ip kube-proxy endpoint-UNZV3OEC-ns4/svc4/tcp/p80__10.180.0.4/80 ip saddr 10.180.0.4 jump mark-for-masquerade
   420  		add rule ip kube-proxy endpoint-UNZV3OEC-ns4/svc4/tcp/p80__10.180.0.4/80 meta l4proto tcp dnat to 10.180.0.4:80
   421  
   422  		add element ip kube-proxy cluster-ips { 172.30.0.44 }
   423  		add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
   424  		add element ip kube-proxy service-ips { 192.168.99.33 . tcp . 80 : goto external-LAUZTJTB-ns4/svc4/tcp/p80 }
   425  
   426  		# svc5
   427  		add set ip kube-proxy affinity-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 { type ipv4_addr ; flags dynamic,timeout ; timeout 10800s ; }
   428  		add chain ip kube-proxy service-HVFWP5L3-ns5/svc5/tcp/p80
   429  		add rule ip kube-proxy service-HVFWP5L3-ns5/svc5/tcp/p80 ip daddr 172.30.0.45 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
   430  		add rule ip kube-proxy service-HVFWP5L3-ns5/svc5/tcp/p80 ip saddr @affinity-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 goto endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80
   431  		add rule ip kube-proxy service-HVFWP5L3-ns5/svc5/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 }
   432  		add chain ip kube-proxy external-HVFWP5L3-ns5/svc5/tcp/p80
   433  		add rule ip kube-proxy external-HVFWP5L3-ns5/svc5/tcp/p80 jump mark-for-masquerade
   434  		add rule ip kube-proxy external-HVFWP5L3-ns5/svc5/tcp/p80 goto service-HVFWP5L3-ns5/svc5/tcp/p80
   435  
   436  		add chain ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80
   437  		add rule ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 ip saddr 10.180.0.3 jump mark-for-masquerade
   438  		add rule ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 update @affinity-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 { ip saddr }
   439  		add rule ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 meta l4proto tcp dnat to 10.180.0.3:80
   440  
   441  		add chain ip kube-proxy firewall-HVFWP5L3-ns5/svc5/tcp/p80
   442  		add rule ip kube-proxy firewall-HVFWP5L3-ns5/svc5/tcp/p80 ip saddr != { 203.0.113.0/25 } drop
   443  
   444  		add element ip kube-proxy cluster-ips { 172.30.0.45 }
   445  		add element ip kube-proxy service-ips { 172.30.0.45 . tcp . 80 : goto service-HVFWP5L3-ns5/svc5/tcp/p80 }
   446  		add element ip kube-proxy service-ips { 5.6.7.8 . tcp . 80 : goto external-HVFWP5L3-ns5/svc5/tcp/p80 }
   447  		add element ip kube-proxy service-nodeports { tcp . 3002 : goto external-HVFWP5L3-ns5/svc5/tcp/p80 }
   448  		add element ip kube-proxy firewall-ips { 5.6.7.8 . tcp . 80 comment "ns5/svc5:p80" : goto firewall-HVFWP5L3-ns5/svc5/tcp/p80 }
   449  
   450  		# svc6
   451  		add element ip kube-proxy cluster-ips { 172.30.0.46 }
   452  		add element ip kube-proxy no-endpoint-services { 172.30.0.46 . tcp . 80 comment "ns6/svc6:p80" : goto reject-chain }
   453  		`)
   454  
   455  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
   456  }
   457  
   458  // TestNoEndpointsReject tests that a service with no endpoints rejects connections to
   459  // its ClusterIP, ExternalIPs, NodePort, and LoadBalancer IP.
   460  func TestNoEndpointsReject(t *testing.T) {
   461  	nft, fp := NewFakeProxier(v1.IPv4Protocol)
   462  	svcIP := "172.30.0.41"
   463  	svcPort := 80
   464  	svcNodePort := 3001
   465  	svcExternalIPs := "192.168.99.11"
   466  	svcLBIP := "1.2.3.4"
   467  	svcPortName := proxy.ServicePortName{
   468  		NamespacedName: makeNSN("ns1", "svc1"),
   469  		Port:           "p80",
   470  	}
   471  
   472  	makeServiceMap(fp,
   473  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
   474  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
   475  			svc.Spec.ClusterIP = svcIP
   476  			svc.Spec.ExternalIPs = []string{svcExternalIPs}
   477  			svc.Spec.Ports = []v1.ServicePort{{
   478  				Name:     svcPortName.Port,
   479  				Protocol: v1.ProtocolTCP,
   480  				Port:     int32(svcPort),
   481  				NodePort: int32(svcNodePort),
   482  			}}
   483  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
   484  				IP: svcLBIP,
   485  			}}
   486  		}),
   487  	)
   488  	fp.syncProxyRules()
   489  
   490  	runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
   491  		{
   492  			name:     "pod to cluster IP with no endpoints",
   493  			sourceIP: "10.0.0.2",
   494  			destIP:   svcIP,
   495  			destPort: svcPort,
   496  			output:   "REJECT",
   497  		},
   498  		{
   499  			name:     "external to external IP with no endpoints",
   500  			sourceIP: testExternalClient,
   501  			destIP:   svcExternalIPs,
   502  			destPort: svcPort,
   503  			output:   "REJECT",
   504  		},
   505  		{
   506  			name:     "pod to NodePort with no endpoints",
   507  			sourceIP: "10.0.0.2",
   508  			destIP:   testNodeIP,
   509  			destPort: svcNodePort,
   510  			output:   "REJECT",
   511  		},
   512  		{
   513  			name:     "external to NodePort with no endpoints",
   514  			sourceIP: testExternalClient,
   515  			destIP:   testNodeIP,
   516  			destPort: svcNodePort,
   517  			output:   "REJECT",
   518  		},
   519  		{
   520  			name:     "pod to LoadBalancer IP with no endpoints",
   521  			sourceIP: "10.0.0.2",
   522  			destIP:   svcLBIP,
   523  			destPort: svcPort,
   524  			output:   "REJECT",
   525  		},
   526  		{
   527  			name:     "external to LoadBalancer IP with no endpoints",
   528  			sourceIP: testExternalClient,
   529  			destIP:   svcLBIP,
   530  			destPort: svcPort,
   531  			output:   "REJECT",
   532  		},
   533  	})
   534  }
   535  
   536  // TestClusterIPGeneral tests various basic features of a ClusterIP service
   537  func TestClusterIPGeneral(t *testing.T) {
   538  	nft, fp := NewFakeProxier(v1.IPv4Protocol)
   539  
   540  	makeServiceMap(fp,
   541  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
   542  			svc.Spec.ClusterIP = "172.30.0.41"
   543  			svc.Spec.Ports = []v1.ServicePort{{
   544  				Name:     "http",
   545  				Port:     80,
   546  				Protocol: v1.ProtocolTCP,
   547  			}}
   548  		}),
   549  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
   550  			svc.Spec.ClusterIP = "172.30.0.42"
   551  			svc.Spec.Ports = []v1.ServicePort{
   552  				{
   553  					Name:     "http",
   554  					Port:     80,
   555  					Protocol: v1.ProtocolTCP,
   556  				},
   557  				{
   558  					Name:       "https",
   559  					Port:       443,
   560  					Protocol:   v1.ProtocolTCP,
   561  					TargetPort: intstr.FromInt32(8443),
   562  				},
   563  				{
   564  					Name:     "dns-udp",
   565  					Port:     53,
   566  					Protocol: v1.ProtocolUDP,
   567  				},
   568  				{
   569  					Name:     "dns-tcp",
   570  					Port:     53,
   571  					Protocol: v1.ProtocolTCP,
   572  					// We use TargetPort on TCP but not UDP/SCTP to
   573  					// help disambiguate the output.
   574  					TargetPort: intstr.FromInt32(5353),
   575  				},
   576  				{
   577  					Name:     "dns-sctp",
   578  					Port:     53,
   579  					Protocol: v1.ProtocolSCTP,
   580  				},
   581  			}
   582  		}),
   583  	)
   584  
   585  	populateEndpointSlices(fp,
   586  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
   587  			eps.AddressType = discovery.AddressTypeIPv4
   588  			eps.Endpoints = []discovery.Endpoint{{
   589  				Addresses: []string{"10.180.0.1"},
   590  				NodeName:  ptr.To(testHostname),
   591  			}}
   592  			eps.Ports = []discovery.EndpointPort{{
   593  				Name:     ptr.To("http"),
   594  				Port:     ptr.To[int32](80),
   595  				Protocol: ptr.To(v1.ProtocolTCP),
   596  			}}
   597  		}),
   598  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
   599  			eps.AddressType = discovery.AddressTypeIPv4
   600  			eps.Endpoints = []discovery.Endpoint{
   601  				{
   602  					Addresses: []string{"10.180.0.1"},
   603  					NodeName:  ptr.To(testHostname),
   604  				},
   605  				{
   606  					Addresses: []string{"10.180.2.1"},
   607  					NodeName:  ptr.To("host2"),
   608  				},
   609  			}
   610  			eps.Ports = []discovery.EndpointPort{
   611  				{
   612  					Name:     ptr.To("http"),
   613  					Port:     ptr.To[int32](80),
   614  					Protocol: ptr.To(v1.ProtocolTCP),
   615  				},
   616  				{
   617  					Name:     ptr.To("https"),
   618  					Port:     ptr.To[int32](8443),
   619  					Protocol: ptr.To(v1.ProtocolTCP),
   620  				},
   621  				{
   622  					Name:     ptr.To("dns-udp"),
   623  					Port:     ptr.To[int32](53),
   624  					Protocol: ptr.To(v1.ProtocolUDP),
   625  				},
   626  				{
   627  					Name:     ptr.To("dns-tcp"),
   628  					Port:     ptr.To[int32](5353),
   629  					Protocol: ptr.To(v1.ProtocolTCP),
   630  				},
   631  				{
   632  					Name:     ptr.To("dns-sctp"),
   633  					Port:     ptr.To[int32](53),
   634  					Protocol: ptr.To(v1.ProtocolSCTP),
   635  				},
   636  			}
   637  		}),
   638  	)
   639  
   640  	fp.syncProxyRules()
   641  
   642  	runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
   643  		{
   644  			name:     "simple clusterIP",
   645  			sourceIP: "10.180.0.2",
   646  			destIP:   "172.30.0.41",
   647  			destPort: 80,
   648  			output:   "10.180.0.1:80",
   649  			masq:     false,
   650  		},
   651  		{
   652  			name:     "hairpin to cluster IP",
   653  			sourceIP: "10.180.0.1",
   654  			destIP:   "172.30.0.41",
   655  			destPort: 80,
   656  			output:   "10.180.0.1:80",
   657  			masq:     true,
   658  		},
   659  		{
   660  			name:     "clusterIP with multiple endpoints",
   661  			sourceIP: "10.180.0.2",
   662  			destIP:   "172.30.0.42",
   663  			destPort: 80,
   664  			output:   "10.180.0.1:80, 10.180.2.1:80",
   665  			masq:     false,
   666  		},
   667  		{
   668  			name:     "clusterIP with TargetPort",
   669  			sourceIP: "10.180.0.2",
   670  			destIP:   "172.30.0.42",
   671  			destPort: 443,
   672  			output:   "10.180.0.1:8443, 10.180.2.1:8443",
   673  			masq:     false,
   674  		},
   675  		{
   676  			name:     "clusterIP with TCP, UDP, and SCTP on same port (TCP)",
   677  			sourceIP: "10.180.0.2",
   678  			protocol: v1.ProtocolTCP,
   679  			destIP:   "172.30.0.42",
   680  			destPort: 53,
   681  			output:   "10.180.0.1:5353, 10.180.2.1:5353",
   682  			masq:     false,
   683  		},
   684  		{
   685  			name:     "clusterIP with TCP, UDP, and SCTP on same port (TCP)",
   686  			sourceIP: "10.180.0.2",
   687  			protocol: v1.ProtocolUDP,
   688  			destIP:   "172.30.0.42",
   689  			destPort: 53,
   690  			output:   "10.180.0.1:53, 10.180.2.1:53",
   691  			masq:     false,
   692  		},
   693  		{
   694  			name:     "clusterIP with TCP, UDP, and SCTP on same port (SCTP)",
   695  			sourceIP: "10.180.0.2",
   696  			protocol: v1.ProtocolSCTP,
   697  			destIP:   "172.30.0.42",
   698  			destPort: 53,
   699  			output:   "10.180.0.1:53, 10.180.2.1:53",
   700  			masq:     false,
   701  		},
   702  		{
   703  			name:     "TCP-only port does not match UDP traffic",
   704  			sourceIP: "10.180.0.2",
   705  			protocol: v1.ProtocolUDP,
   706  			destIP:   "172.30.0.42",
   707  			destPort: 80,
   708  			output:   "REJECT",
   709  		},
   710  		{
   711  			name:     "svc1 does not accept svc2's ports",
   712  			sourceIP: "10.180.0.2",
   713  			destIP:   "172.30.0.41",
   714  			destPort: 443,
   715  			output:   "REJECT",
   716  		},
   717  		{
   718  			name:     "packet to unallocated cluster ip",
   719  			sourceIP: "10.180.0.2",
   720  			destIP:   "172.30.0.50",
   721  			destPort: 80,
   722  			output:   "DROP",
   723  		},
   724  	})
   725  }
   726  
   727  func TestLoadBalancer(t *testing.T) {
   728  	nft, fp := NewFakeProxier(v1.IPv4Protocol)
   729  	svcIP := "172.30.0.41"
   730  	svcPort := 80
   731  	svcNodePort := 3001
   732  	svcLBIP1 := "1.2.3.4"
   733  	svcLBIP2 := "5.6.7.8"
   734  	svcPortName := proxy.ServicePortName{
   735  		NamespacedName: makeNSN("ns1", "svc1"),
   736  		Port:           "p80",
   737  		Protocol:       v1.ProtocolTCP,
   738  	}
   739  
   740  	makeServiceMap(fp,
   741  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
   742  			svc.Spec.Type = "LoadBalancer"
   743  			svc.Spec.ClusterIP = svcIP
   744  			svc.Spec.Ports = []v1.ServicePort{{
   745  				Name:     svcPortName.Port,
   746  				Port:     int32(svcPort),
   747  				Protocol: v1.ProtocolTCP,
   748  				NodePort: int32(svcNodePort),
   749  			}}
   750  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{
   751  				{IP: svcLBIP1},
   752  				{IP: svcLBIP2},
   753  			}
   754  			svc.Spec.LoadBalancerSourceRanges = []string{
   755  				"192.168.0.0/24",
   756  
   757  				// Regression test that excess whitespace gets ignored
   758  				" 203.0.113.0/25",
   759  			}
   760  		}),
   761  	)
   762  
   763  	epIP := "10.180.0.1"
   764  	populateEndpointSlices(fp,
   765  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
   766  			eps.AddressType = discovery.AddressTypeIPv4
   767  			eps.Endpoints = []discovery.Endpoint{{
   768  				Addresses: []string{epIP},
   769  			}}
   770  			eps.Ports = []discovery.EndpointPort{{
   771  				Name:     ptr.To(svcPortName.Port),
   772  				Port:     ptr.To(int32(svcPort)),
   773  				Protocol: ptr.To(v1.ProtocolTCP),
   774  			}}
   775  		}),
   776  	)
   777  
   778  	fp.syncProxyRules()
   779  
   780  	runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
   781  		{
   782  			name:     "pod to cluster IP",
   783  			sourceIP: "10.0.0.2",
   784  			destIP:   svcIP,
   785  			destPort: svcPort,
   786  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   787  			masq:     false,
   788  		},
   789  		{
   790  			name:     "external to nodePort",
   791  			sourceIP: testExternalClient,
   792  			destIP:   testNodeIP,
   793  			destPort: svcNodePort,
   794  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   795  			masq:     true,
   796  		},
   797  		{
   798  			name:     "nodePort bypasses LoadBalancerSourceRanges",
   799  			sourceIP: testExternalClientBlocked,
   800  			destIP:   testNodeIP,
   801  			destPort: svcNodePort,
   802  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   803  			masq:     true,
   804  		},
   805  		{
   806  			name:     "accepted external to LB1",
   807  			sourceIP: testExternalClient,
   808  			destIP:   svcLBIP1,
   809  			destPort: svcPort,
   810  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   811  			masq:     true,
   812  		},
   813  		{
   814  			name:     "accepted external to LB2",
   815  			sourceIP: testExternalClient,
   816  			destIP:   svcLBIP2,
   817  			destPort: svcPort,
   818  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   819  			masq:     true,
   820  		},
   821  		{
   822  			name:     "blocked external to LB1",
   823  			sourceIP: testExternalClientBlocked,
   824  			destIP:   svcLBIP1,
   825  			destPort: svcPort,
   826  			output:   "DROP",
   827  		},
   828  		{
   829  			name:     "blocked external to LB2",
   830  			sourceIP: testExternalClientBlocked,
   831  			destIP:   svcLBIP2,
   832  			destPort: svcPort,
   833  			output:   "DROP",
   834  		},
   835  		{
   836  			name:     "pod to LB1 (blocked by LoadBalancerSourceRanges)",
   837  			sourceIP: "10.0.0.2",
   838  			destIP:   svcLBIP1,
   839  			destPort: svcPort,
   840  			output:   "DROP",
   841  		},
   842  		{
   843  			name:     "pod to LB2 (blocked by LoadBalancerSourceRanges)",
   844  			sourceIP: "10.0.0.2",
   845  			destIP:   svcLBIP2,
   846  			destPort: svcPort,
   847  			output:   "DROP",
   848  		},
   849  		{
   850  			name:     "node to LB1 (allowed by LoadBalancerSourceRanges)",
   851  			sourceIP: testNodeIP,
   852  			destIP:   svcLBIP1,
   853  			destPort: svcPort,
   854  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   855  			masq:     true,
   856  		},
   857  		{
   858  			name:     "node to LB2 (allowed by LoadBalancerSourceRanges)",
   859  			sourceIP: testNodeIP,
   860  			destIP:   svcLBIP2,
   861  			destPort: svcPort,
   862  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   863  			masq:     true,
   864  		},
   865  
   866  		// The LB rules assume that when you connect from a node to a LB IP, that
   867  		// something external to kube-proxy will cause the connection to be
   868  		// SNATted to the LB IP, so if the LoadBalancerSourceRanges include the
   869  		// node IP, then we add a rule allowing traffic from the LB IP as well...
   870  		{
   871  			name:     "same node to LB1, SNATted to LB1 (implicitly allowed)",
   872  			sourceIP: svcLBIP1,
   873  			destIP:   svcLBIP1,
   874  			destPort: svcPort,
   875  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   876  			masq:     true,
   877  		},
   878  		{
   879  			name:     "same node to LB2, SNATted to LB2 (implicitly allowed)",
   880  			sourceIP: svcLBIP2,
   881  			destIP:   svcLBIP2,
   882  			destPort: svcPort,
   883  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   884  			masq:     true,
   885  		},
   886  	})
   887  }
   888  
   889  // TestNodePorts tests NodePort services under various combinations of the
   890  // --nodeport-addresses flags.
   891  func TestNodePorts(t *testing.T) {
   892  	testCases := []struct {
   893  		name string
   894  
   895  		family            v1.IPFamily
   896  		nodePortAddresses []string
   897  
   898  		// allowAltNodeIP is true if we expect NodePort traffic on the alternate
   899  		// node IP to be accepted
   900  		allowAltNodeIP bool
   901  	}{
   902  		{
   903  			name: "ipv4",
   904  
   905  			family:            v1.IPv4Protocol,
   906  			nodePortAddresses: nil,
   907  
   908  			allowAltNodeIP: false,
   909  		},
   910  		{
   911  			name: "ipv4, multiple nodeport-addresses",
   912  
   913  			family:            v1.IPv4Protocol,
   914  			nodePortAddresses: []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64"},
   915  
   916  			allowAltNodeIP: true,
   917  		},
   918  		{
   919  			name: "ipv6",
   920  
   921  			family:            v1.IPv6Protocol,
   922  			nodePortAddresses: nil,
   923  
   924  			allowAltNodeIP: false,
   925  		},
   926  		{
   927  			name: "ipv6, multiple nodeport-addresses",
   928  
   929  			family:            v1.IPv6Protocol,
   930  			nodePortAddresses: []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64", "2001:db8:1::2/128"},
   931  
   932  			allowAltNodeIP: true,
   933  		},
   934  	}
   935  
   936  	for _, tc := range testCases {
   937  		t.Run(tc.name, func(t *testing.T) {
   938  			nft, fp := NewFakeProxier(tc.family)
   939  
   940  			var svcIP, epIP1, epIP2 string
   941  			var nodeIP string
   942  			if tc.family == v1.IPv4Protocol {
   943  				svcIP = "172.30.0.41"
   944  				epIP1 = "10.180.0.1"
   945  				epIP2 = "10.180.2.1"
   946  				nodeIP = testNodeIP
   947  			} else {
   948  				svcIP = "fd00:172:30::41"
   949  				epIP1 = "fd00:10:180::1"
   950  				epIP2 = "fd00:10:180::2:1"
   951  				nodeIP = testNodeIPv6
   952  			}
   953  			if tc.nodePortAddresses != nil {
   954  				fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses, netutils.ParseIPSloppy(nodeIP))
   955  			}
   956  
   957  			makeServiceMap(fp,
   958  				makeTestService("ns1", "svc1", func(svc *v1.Service) {
   959  					svc.Spec.Type = v1.ServiceTypeNodePort
   960  					svc.Spec.ClusterIP = svcIP
   961  					svc.Spec.Ports = []v1.ServicePort{{
   962  						Name:     "p80",
   963  						Port:     80,
   964  						Protocol: v1.ProtocolTCP,
   965  						NodePort: 3001,
   966  					}}
   967  				}),
   968  			)
   969  
   970  			populateEndpointSlices(fp,
   971  				makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
   972  					if tc.family == v1.IPv4Protocol {
   973  						eps.AddressType = discovery.AddressTypeIPv4
   974  					} else {
   975  						eps.AddressType = discovery.AddressTypeIPv6
   976  					}
   977  					eps.Endpoints = []discovery.Endpoint{{
   978  						Addresses: []string{epIP1},
   979  						NodeName:  nil,
   980  					}, {
   981  						Addresses: []string{epIP2},
   982  						NodeName:  ptr.To(testHostname),
   983  					}}
   984  					eps.Ports = []discovery.EndpointPort{{
   985  						Name:     ptr.To("p80"),
   986  						Port:     ptr.To[int32](80),
   987  						Protocol: ptr.To(v1.ProtocolTCP),
   988  					}}
   989  				}),
   990  			)
   991  
   992  			fp.syncProxyRules()
   993  
   994  			var podIP, externalClientIP, altNodeIP string
   995  			if tc.family == v1.IPv4Protocol {
   996  				podIP = "10.0.0.2"
   997  				externalClientIP = testExternalClient
   998  				altNodeIP = testNodeIPAlt
   999  			} else {
  1000  				podIP = "fd00:10::2"
  1001  				externalClientIP = "2600:5200::1"
  1002  				altNodeIP = testNodeIPv6Alt
  1003  			}
  1004  			output := net.JoinHostPort(epIP1, "80") + ", " + net.JoinHostPort(epIP2, "80")
  1005  
  1006  			// Basic tests are the same for all cases
  1007  			runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
  1008  				{
  1009  					name:     "pod to cluster IP",
  1010  					sourceIP: podIP,
  1011  					destIP:   svcIP,
  1012  					destPort: 80,
  1013  					output:   output,
  1014  					masq:     false,
  1015  				},
  1016  				{
  1017  					name:     "external to nodePort",
  1018  					sourceIP: externalClientIP,
  1019  					destIP:   nodeIP,
  1020  					destPort: 3001,
  1021  					output:   output,
  1022  					masq:     true,
  1023  				},
  1024  				{
  1025  					name:     "node to nodePort",
  1026  					sourceIP: nodeIP,
  1027  					destIP:   nodeIP,
  1028  					destPort: 3001,
  1029  					output:   output,
  1030  					masq:     true,
  1031  				},
  1032  			})
  1033  
  1034  			if tc.allowAltNodeIP {
  1035  				runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
  1036  					{
  1037  						name:     "external to nodePort on secondary IP",
  1038  						sourceIP: externalClientIP,
  1039  						destIP:   altNodeIP,
  1040  						destPort: 3001,
  1041  						output:   output,
  1042  						masq:     true,
  1043  					},
  1044  				})
  1045  			} else {
  1046  				runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
  1047  					{
  1048  						name:     "secondary nodeIP ignores NodePorts",
  1049  						sourceIP: externalClientIP,
  1050  						destIP:   altNodeIP,
  1051  						destPort: 3001,
  1052  						output:   "",
  1053  					},
  1054  				})
  1055  			}
  1056  		})
  1057  	}
  1058  }
  1059  
  1060  // TestExternalTrafficPolicyLocal tests that traffic to externally-facing IPs does not get
  1061  // masqueraded when using Local traffic policy. For traffic from external sources, that
  1062  // means it can also only be routed to local endpoints, but for traffic from internal
  1063  // sources, it gets routed to all endpoints.
  1064  func TestExternalTrafficPolicyLocal(t *testing.T) {
  1065  	nft, fp := NewFakeProxier(v1.IPv4Protocol)
  1066  
  1067  	svcIP := "172.30.0.41"
  1068  	svcPort := 80
  1069  	svcNodePort := 3001
  1070  	svcHealthCheckNodePort := 30000
  1071  	svcExternalIPs := "192.168.99.11"
  1072  	svcLBIP := "1.2.3.4"
  1073  	svcPortName := proxy.ServicePortName{
  1074  		NamespacedName: makeNSN("ns1", "svc1"),
  1075  		Port:           "p80",
  1076  	}
  1077  
  1078  	makeServiceMap(fp,
  1079  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  1080  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1081  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  1082  			svc.Spec.ClusterIP = svcIP
  1083  			svc.Spec.ExternalIPs = []string{svcExternalIPs}
  1084  			svc.Spec.Ports = []v1.ServicePort{{
  1085  				Name:       svcPortName.Port,
  1086  				Port:       int32(svcPort),
  1087  				Protocol:   v1.ProtocolTCP,
  1088  				NodePort:   int32(svcNodePort),
  1089  				TargetPort: intstr.FromInt32(int32(svcPort)),
  1090  			}}
  1091  			svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort)
  1092  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  1093  				IP: svcLBIP,
  1094  			}}
  1095  		}),
  1096  	)
  1097  
  1098  	epIP1 := "10.180.0.1"
  1099  	epIP2 := "10.180.2.1"
  1100  	populateEndpointSlices(fp,
  1101  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  1102  			eps.AddressType = discovery.AddressTypeIPv4
  1103  			eps.Endpoints = []discovery.Endpoint{{
  1104  				Addresses: []string{epIP1},
  1105  			}, {
  1106  				Addresses: []string{epIP2},
  1107  				NodeName:  ptr.To(testHostname),
  1108  			}}
  1109  			eps.Ports = []discovery.EndpointPort{{
  1110  				Name:     ptr.To(svcPortName.Port),
  1111  				Port:     ptr.To(int32(svcPort)),
  1112  				Protocol: ptr.To(v1.ProtocolTCP),
  1113  			}}
  1114  		}),
  1115  	)
  1116  
  1117  	fp.syncProxyRules()
  1118  
  1119  	runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
  1120  		{
  1121  			name:     "pod to cluster IP hits both endpoints, unmasqueraded",
  1122  			sourceIP: "10.0.0.2",
  1123  			destIP:   svcIP,
  1124  			destPort: svcPort,
  1125  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1126  			masq:     false,
  1127  		},
  1128  		{
  1129  			name:     "pod to external IP hits both endpoints, unmasqueraded",
  1130  			sourceIP: "10.0.0.2",
  1131  			destIP:   svcExternalIPs,
  1132  			destPort: svcPort,
  1133  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1134  			masq:     false,
  1135  		},
  1136  		{
  1137  			name:     "external to external IP hits only local endpoint, unmasqueraded",
  1138  			sourceIP: testExternalClient,
  1139  			destIP:   svcExternalIPs,
  1140  			destPort: svcPort,
  1141  			output:   fmt.Sprintf("%s:%d", epIP2, svcPort),
  1142  			masq:     false,
  1143  		},
  1144  		{
  1145  			name:     "pod to LB IP hits only both endpoints, unmasqueraded",
  1146  			sourceIP: "10.0.0.2",
  1147  			destIP:   svcLBIP,
  1148  			destPort: svcPort,
  1149  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1150  			masq:     false,
  1151  		},
  1152  		{
  1153  			name:     "external to LB IP hits only local endpoint, unmasqueraded",
  1154  			sourceIP: testExternalClient,
  1155  			destIP:   svcLBIP,
  1156  			destPort: svcPort,
  1157  			output:   fmt.Sprintf("%s:%d", epIP2, svcPort),
  1158  			masq:     false,
  1159  		},
  1160  		{
  1161  			name:     "pod to NodePort hits both endpoints, unmasqueraded",
  1162  			sourceIP: "10.0.0.2",
  1163  			destIP:   testNodeIP,
  1164  			destPort: svcNodePort,
  1165  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1166  			masq:     false,
  1167  		},
  1168  		{
  1169  			name:     "external to NodePort hits only local endpoint, unmasqueraded",
  1170  			sourceIP: testExternalClient,
  1171  			destIP:   testNodeIP,
  1172  			destPort: svcNodePort,
  1173  			output:   fmt.Sprintf("%s:%d", epIP2, svcPort),
  1174  			masq:     false,
  1175  		},
  1176  	})
  1177  }
  1178  
  1179  // TestExternalTrafficPolicyCluster tests that traffic to an externally-facing IP gets
  1180  // masqueraded when using Cluster traffic policy.
  1181  func TestExternalTrafficPolicyCluster(t *testing.T) {
  1182  	nft, fp := NewFakeProxier(v1.IPv4Protocol)
  1183  
  1184  	svcIP := "172.30.0.41"
  1185  	svcPort := 80
  1186  	svcNodePort := 3001
  1187  	svcExternalIPs := "192.168.99.11"
  1188  	svcLBIP := "1.2.3.4"
  1189  	svcPortName := proxy.ServicePortName{
  1190  		NamespacedName: makeNSN("ns1", "svc1"),
  1191  		Port:           "p80",
  1192  	}
  1193  
  1194  	makeServiceMap(fp,
  1195  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  1196  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1197  			svc.Spec.ClusterIP = svcIP
  1198  			svc.Spec.ExternalIPs = []string{svcExternalIPs}
  1199  			svc.Spec.Ports = []v1.ServicePort{{
  1200  				Name:       svcPortName.Port,
  1201  				Port:       int32(svcPort),
  1202  				Protocol:   v1.ProtocolTCP,
  1203  				NodePort:   int32(svcNodePort),
  1204  				TargetPort: intstr.FromInt32(int32(svcPort)),
  1205  			}}
  1206  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  1207  				IP: svcLBIP,
  1208  			}}
  1209  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster
  1210  		}),
  1211  	)
  1212  
  1213  	epIP1 := "10.180.0.1"
  1214  	epIP2 := "10.180.2.1"
  1215  	populateEndpointSlices(fp,
  1216  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  1217  			eps.AddressType = discovery.AddressTypeIPv4
  1218  			eps.Endpoints = []discovery.Endpoint{{
  1219  				Addresses: []string{epIP1},
  1220  				NodeName:  nil,
  1221  			}, {
  1222  				Addresses: []string{epIP2},
  1223  				NodeName:  ptr.To(testHostname),
  1224  			}}
  1225  			eps.Ports = []discovery.EndpointPort{{
  1226  				Name:     ptr.To(svcPortName.Port),
  1227  				Port:     ptr.To(int32(svcPort)),
  1228  				Protocol: ptr.To(v1.ProtocolTCP),
  1229  			}}
  1230  		}),
  1231  	)
  1232  
  1233  	fp.syncProxyRules()
  1234  
  1235  	runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
  1236  		{
  1237  			name:     "pod to cluster IP hits both endpoints, unmasqueraded",
  1238  			sourceIP: "10.0.0.2",
  1239  			destIP:   svcIP,
  1240  			destPort: svcPort,
  1241  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1242  			masq:     false,
  1243  		},
  1244  		{
  1245  			name:     "pod to external IP hits both endpoints, masqueraded",
  1246  			sourceIP: "10.0.0.2",
  1247  			destIP:   svcExternalIPs,
  1248  			destPort: svcPort,
  1249  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1250  			masq:     true,
  1251  		},
  1252  		{
  1253  			name:     "external to external IP hits both endpoints, masqueraded",
  1254  			sourceIP: testExternalClient,
  1255  			destIP:   svcExternalIPs,
  1256  			destPort: svcPort,
  1257  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1258  			masq:     true,
  1259  		},
  1260  		{
  1261  			name:     "pod to LB IP hits both endpoints, masqueraded",
  1262  			sourceIP: "10.0.0.2",
  1263  			destIP:   svcLBIP,
  1264  			destPort: svcPort,
  1265  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1266  			masq:     true,
  1267  		},
  1268  		{
  1269  			name:     "external to LB IP hits both endpoints, masqueraded",
  1270  			sourceIP: testExternalClient,
  1271  			destIP:   svcLBIP,
  1272  			destPort: svcPort,
  1273  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1274  			masq:     true,
  1275  		},
  1276  		{
  1277  			name:     "pod to NodePort hits both endpoints, masqueraded",
  1278  			sourceIP: "10.0.0.2",
  1279  			destIP:   testNodeIP,
  1280  			destPort: svcNodePort,
  1281  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1282  			masq:     true,
  1283  		},
  1284  		{
  1285  			name:     "external to NodePort hits both endpoints, masqueraded",
  1286  			sourceIP: testExternalClient,
  1287  			destIP:   testNodeIP,
  1288  			destPort: svcNodePort,
  1289  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1290  			masq:     true,
  1291  		},
  1292  	})
  1293  }
  1294  
  1295  func makeTestService(namespace, name string, svcFunc func(*v1.Service)) *v1.Service {
  1296  	svc := &v1.Service{
  1297  		ObjectMeta: metav1.ObjectMeta{
  1298  			Name:        name,
  1299  			Namespace:   namespace,
  1300  			Annotations: map[string]string{},
  1301  		},
  1302  		Spec:   v1.ServiceSpec{},
  1303  		Status: v1.ServiceStatus{},
  1304  	}
  1305  	svcFunc(svc)
  1306  	return svc
  1307  }
  1308  
  1309  func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port, nodeport int32, targetPort int) []v1.ServicePort {
  1310  	svcPort := v1.ServicePort{
  1311  		Name:       name,
  1312  		Protocol:   protocol,
  1313  		Port:       port,
  1314  		NodePort:   nodeport,
  1315  		TargetPort: intstr.FromInt32(int32(targetPort)),
  1316  	}
  1317  	return append(array, svcPort)
  1318  }
  1319  
  1320  func TestBuildServiceMapAddRemove(t *testing.T) {
  1321  	_, fp := NewFakeProxier(v1.IPv4Protocol)
  1322  
  1323  	services := []*v1.Service{
  1324  		makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
  1325  			svc.Spec.Type = v1.ServiceTypeClusterIP
  1326  			svc.Spec.ClusterIP = "172.30.55.4"
  1327  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
  1328  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
  1329  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "sctpport", "SCTP", 1236, 6321, 0)
  1330  		}),
  1331  		makeTestService("somewhere-else", "node-port", func(svc *v1.Service) {
  1332  			svc.Spec.Type = v1.ServiceTypeNodePort
  1333  			svc.Spec.ClusterIP = "172.30.55.10"
  1334  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blahblah", "UDP", 345, 678, 0)
  1335  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "moreblahblah", "TCP", 344, 677, 0)
  1336  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "muchmoreblah", "SCTP", 343, 676, 0)
  1337  		}),
  1338  		makeTestService("somewhere", "load-balancer", func(svc *v1.Service) {
  1339  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1340  			svc.Spec.ClusterIP = "172.30.55.11"
  1341  			svc.Spec.LoadBalancerIP = "1.2.3.4"
  1342  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar", "UDP", 8675, 30061, 7000)
  1343  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8676, 30062, 7001)
  1344  			svc.Status.LoadBalancer = v1.LoadBalancerStatus{
  1345  				Ingress: []v1.LoadBalancerIngress{
  1346  					{IP: "1.2.3.4"},
  1347  				},
  1348  			}
  1349  		}),
  1350  		makeTestService("somewhere", "only-local-load-balancer", func(svc *v1.Service) {
  1351  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1352  			svc.Spec.ClusterIP = "172.30.55.12"
  1353  			svc.Spec.LoadBalancerIP = "5.6.7.8"
  1354  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar2", "UDP", 8677, 30063, 7002)
  1355  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8678, 30064, 7003)
  1356  			svc.Status.LoadBalancer = v1.LoadBalancerStatus{
  1357  				Ingress: []v1.LoadBalancerIngress{
  1358  					{IP: "5.6.7.8"},
  1359  				},
  1360  			}
  1361  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  1362  			svc.Spec.HealthCheckNodePort = 345
  1363  		}),
  1364  	}
  1365  
  1366  	for i := range services {
  1367  		fp.OnServiceAdd(services[i])
  1368  	}
  1369  	result := fp.svcPortMap.Update(fp.serviceChanges)
  1370  	if len(fp.svcPortMap) != 10 {
  1371  		t.Errorf("expected service map length 10, got %v", fp.svcPortMap)
  1372  	}
  1373  
  1374  	if len(result.DeletedUDPClusterIPs) != 0 {
  1375  		// Services only added, so nothing stale yet
  1376  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  1377  	}
  1378  
  1379  	// The only-local-loadbalancer ones get added
  1380  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  1381  	if len(healthCheckNodePorts) != 1 {
  1382  		t.Errorf("expected 1 healthcheck port, got %v", healthCheckNodePorts)
  1383  	} else {
  1384  		nsn := makeNSN("somewhere", "only-local-load-balancer")
  1385  		if port, found := healthCheckNodePorts[nsn]; !found || port != 345 {
  1386  			t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, healthCheckNodePorts)
  1387  		}
  1388  	}
  1389  
  1390  	// Remove some stuff
  1391  	// oneService is a modification of services[0] with removed first port.
  1392  	oneService := makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
  1393  		svc.Spec.Type = v1.ServiceTypeClusterIP
  1394  		svc.Spec.ClusterIP = "172.30.55.4"
  1395  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
  1396  	})
  1397  
  1398  	fp.OnServiceUpdate(services[0], oneService)
  1399  	fp.OnServiceDelete(services[1])
  1400  	fp.OnServiceDelete(services[2])
  1401  	fp.OnServiceDelete(services[3])
  1402  
  1403  	result = fp.svcPortMap.Update(fp.serviceChanges)
  1404  	if len(fp.svcPortMap) != 1 {
  1405  		t.Errorf("expected service map length 1, got %v", fp.svcPortMap)
  1406  	}
  1407  
  1408  	// All services but one were deleted. While you'd expect only the ClusterIPs
  1409  	// from the three deleted services here, we still have the ClusterIP for
  1410  	// the not-deleted service, because one of it's ServicePorts was deleted.
  1411  	expectedStaleUDPServices := []string{"172.30.55.10", "172.30.55.4", "172.30.55.11", "172.30.55.12"}
  1412  	if len(result.DeletedUDPClusterIPs) != len(expectedStaleUDPServices) {
  1413  		t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.DeletedUDPClusterIPs.UnsortedList())
  1414  	}
  1415  	for _, ip := range expectedStaleUDPServices {
  1416  		if !result.DeletedUDPClusterIPs.Has(ip) {
  1417  			t.Errorf("expected stale UDP service service %s", ip)
  1418  		}
  1419  	}
  1420  
  1421  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  1422  	if len(healthCheckNodePorts) != 0 {
  1423  		t.Errorf("expected 0 healthcheck ports, got %v", healthCheckNodePorts)
  1424  	}
  1425  }
  1426  
  1427  func TestBuildServiceMapServiceHeadless(t *testing.T) {
  1428  	_, fp := NewFakeProxier(v1.IPv4Protocol)
  1429  
  1430  	makeServiceMap(fp,
  1431  		makeTestService("somewhere-else", "headless", func(svc *v1.Service) {
  1432  			svc.Spec.Type = v1.ServiceTypeClusterIP
  1433  			svc.Spec.ClusterIP = v1.ClusterIPNone
  1434  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0)
  1435  		}),
  1436  		makeTestService("somewhere-else", "headless-without-port", func(svc *v1.Service) {
  1437  			svc.Spec.Type = v1.ServiceTypeClusterIP
  1438  			svc.Spec.ClusterIP = v1.ClusterIPNone
  1439  		}),
  1440  	)
  1441  
  1442  	// Headless service should be ignored
  1443  	result := fp.svcPortMap.Update(fp.serviceChanges)
  1444  	if len(fp.svcPortMap) != 0 {
  1445  		t.Errorf("expected service map length 0, got %d", len(fp.svcPortMap))
  1446  	}
  1447  
  1448  	if len(result.DeletedUDPClusterIPs) != 0 {
  1449  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  1450  	}
  1451  
  1452  	// No proxied services, so no healthchecks
  1453  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  1454  	if len(healthCheckNodePorts) != 0 {
  1455  		t.Errorf("expected healthcheck ports length 0, got %d", len(healthCheckNodePorts))
  1456  	}
  1457  }
  1458  
  1459  func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
  1460  	_, fp := NewFakeProxier(v1.IPv4Protocol)
  1461  
  1462  	makeServiceMap(fp,
  1463  		makeTestService("somewhere-else", "external-name", func(svc *v1.Service) {
  1464  			svc.Spec.Type = v1.ServiceTypeExternalName
  1465  			svc.Spec.ClusterIP = "172.30.55.4" // Should be ignored
  1466  			svc.Spec.ExternalName = "foo2.bar.com"
  1467  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blah", "UDP", 1235, 5321, 0)
  1468  		}),
  1469  	)
  1470  
  1471  	result := fp.svcPortMap.Update(fp.serviceChanges)
  1472  	if len(fp.svcPortMap) != 0 {
  1473  		t.Errorf("expected service map length 0, got %v", fp.svcPortMap)
  1474  	}
  1475  	if len(result.DeletedUDPClusterIPs) != 0 {
  1476  		t.Errorf("expected stale UDP services length 0, got %v", result.DeletedUDPClusterIPs)
  1477  	}
  1478  	// No proxied services, so no healthchecks
  1479  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  1480  	if len(healthCheckNodePorts) != 0 {
  1481  		t.Errorf("expected healthcheck ports length 0, got %v", healthCheckNodePorts)
  1482  	}
  1483  }
  1484  
  1485  func TestBuildServiceMapServiceUpdate(t *testing.T) {
  1486  	_, fp := NewFakeProxier(v1.IPv4Protocol)
  1487  
  1488  	servicev1 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
  1489  		svc.Spec.Type = v1.ServiceTypeClusterIP
  1490  		svc.Spec.ClusterIP = "172.30.55.4"
  1491  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
  1492  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 0)
  1493  	})
  1494  	servicev2 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
  1495  		svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1496  		svc.Spec.ClusterIP = "172.30.55.4"
  1497  		svc.Spec.LoadBalancerIP = "1.2.3.4"
  1498  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 7002)
  1499  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 7003)
  1500  		svc.Status.LoadBalancer = v1.LoadBalancerStatus{
  1501  			Ingress: []v1.LoadBalancerIngress{
  1502  				{IP: "1.2.3.4"},
  1503  			},
  1504  		}
  1505  		svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  1506  		svc.Spec.HealthCheckNodePort = 345
  1507  	})
  1508  
  1509  	fp.OnServiceAdd(servicev1)
  1510  
  1511  	result := fp.svcPortMap.Update(fp.serviceChanges)
  1512  	if len(fp.svcPortMap) != 2 {
  1513  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  1514  	}
  1515  	if len(result.DeletedUDPClusterIPs) != 0 {
  1516  		// Services only added, so nothing stale yet
  1517  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  1518  	}
  1519  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  1520  	if len(healthCheckNodePorts) != 0 {
  1521  		t.Errorf("expected healthcheck ports length 0, got %v", healthCheckNodePorts)
  1522  	}
  1523  
  1524  	// Change service to load-balancer
  1525  	fp.OnServiceUpdate(servicev1, servicev2)
  1526  	result = fp.svcPortMap.Update(fp.serviceChanges)
  1527  	if len(fp.svcPortMap) != 2 {
  1528  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  1529  	}
  1530  	if len(result.DeletedUDPClusterIPs) != 0 {
  1531  		t.Errorf("expected stale UDP services length 0, got %v", result.DeletedUDPClusterIPs.UnsortedList())
  1532  	}
  1533  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  1534  	if len(healthCheckNodePorts) != 1 {
  1535  		t.Errorf("expected healthcheck ports length 1, got %v", healthCheckNodePorts)
  1536  	}
  1537  
  1538  	// No change; make sure the service map stays the same and there are
  1539  	// no health-check changes
  1540  	fp.OnServiceUpdate(servicev2, servicev2)
  1541  	result = fp.svcPortMap.Update(fp.serviceChanges)
  1542  	if len(fp.svcPortMap) != 2 {
  1543  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  1544  	}
  1545  	if len(result.DeletedUDPClusterIPs) != 0 {
  1546  		t.Errorf("expected stale UDP services length 0, got %v", result.DeletedUDPClusterIPs.UnsortedList())
  1547  	}
  1548  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  1549  	if len(healthCheckNodePorts) != 1 {
  1550  		t.Errorf("expected healthcheck ports length 1, got %v", healthCheckNodePorts)
  1551  	}
  1552  
  1553  	// And back to ClusterIP
  1554  	fp.OnServiceUpdate(servicev2, servicev1)
  1555  	result = fp.svcPortMap.Update(fp.serviceChanges)
  1556  	if len(fp.svcPortMap) != 2 {
  1557  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  1558  	}
  1559  	if len(result.DeletedUDPClusterIPs) != 0 {
  1560  		// Services only added, so nothing stale yet
  1561  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  1562  	}
  1563  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  1564  	if len(healthCheckNodePorts) != 0 {
  1565  		t.Errorf("expected healthcheck ports length 0, got %v", healthCheckNodePorts)
  1566  	}
  1567  }
  1568  
  1569  func populateEndpointSlices(proxier *Proxier, allEndpointSlices ...*discovery.EndpointSlice) {
  1570  	for i := range allEndpointSlices {
  1571  		proxier.OnEndpointSliceAdd(allEndpointSlices[i])
  1572  	}
  1573  }
  1574  
  1575  func makeTestEndpointSlice(namespace, name string, sliceNum int, epsFunc func(*discovery.EndpointSlice)) *discovery.EndpointSlice {
  1576  	eps := &discovery.EndpointSlice{
  1577  		ObjectMeta: metav1.ObjectMeta{
  1578  			Name:      fmt.Sprintf("%s-%d", name, sliceNum),
  1579  			Namespace: namespace,
  1580  			Labels:    map[string]string{discovery.LabelServiceName: name},
  1581  		},
  1582  	}
  1583  	epsFunc(eps)
  1584  	return eps
  1585  }
  1586  
  1587  func makeNSN(namespace, name string) types.NamespacedName {
  1588  	return types.NamespacedName{Namespace: namespace, Name: name}
  1589  }
  1590  
  1591  func makeServicePortName(ns, name, port string, protocol v1.Protocol) proxy.ServicePortName {
  1592  	return proxy.ServicePortName{
  1593  		NamespacedName: makeNSN(ns, name),
  1594  		Port:           port,
  1595  		Protocol:       protocol,
  1596  	}
  1597  }
  1598  
  1599  func makeServiceMap(proxier *Proxier, allServices ...*v1.Service) {
  1600  	for i := range allServices {
  1601  		proxier.OnServiceAdd(allServices[i])
  1602  	}
  1603  
  1604  	proxier.mu.Lock()
  1605  	defer proxier.mu.Unlock()
  1606  	proxier.servicesSynced = true
  1607  }
  1608  
  1609  type endpointExpectation struct {
  1610  	endpoint string
  1611  	isLocal  bool
  1612  }
  1613  
  1614  func checkEndpointExpectations(t *testing.T, tci int, newMap proxy.EndpointsMap, expected map[proxy.ServicePortName][]endpointExpectation) {
  1615  	if len(newMap) != len(expected) {
  1616  		t.Errorf("[%d] expected %d results, got %d: %v", tci, len(expected), len(newMap), newMap)
  1617  	}
  1618  	for x := range expected {
  1619  		if len(newMap[x]) != len(expected[x]) {
  1620  			t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(expected[x]), x, len(newMap[x]))
  1621  		} else {
  1622  			for i := range expected[x] {
  1623  				newEp := newMap[x][i]
  1624  				if newEp.String() != expected[x][i].endpoint ||
  1625  					newEp.IsLocal() != expected[x][i].isLocal {
  1626  					t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newEp)
  1627  				}
  1628  			}
  1629  		}
  1630  	}
  1631  }
  1632  
  1633  func TestUpdateEndpointsMap(t *testing.T) {
  1634  	emptyEndpointSlices := []*discovery.EndpointSlice{
  1635  		makeTestEndpointSlice("ns1", "ep1", 1, func(*discovery.EndpointSlice) {}),
  1636  	}
  1637  	subset1 := func(eps *discovery.EndpointSlice) {
  1638  		eps.AddressType = discovery.AddressTypeIPv4
  1639  		eps.Endpoints = []discovery.Endpoint{{
  1640  			Addresses: []string{"10.1.1.1"},
  1641  		}}
  1642  		eps.Ports = []discovery.EndpointPort{{
  1643  			Name:     ptr.To("p11"),
  1644  			Port:     ptr.To[int32](11),
  1645  			Protocol: ptr.To(v1.ProtocolUDP),
  1646  		}}
  1647  	}
  1648  	subset2 := func(eps *discovery.EndpointSlice) {
  1649  		eps.AddressType = discovery.AddressTypeIPv4
  1650  		eps.Endpoints = []discovery.Endpoint{{
  1651  			Addresses: []string{"10.1.1.2"},
  1652  		}}
  1653  		eps.Ports = []discovery.EndpointPort{{
  1654  			Name:     ptr.To("p12"),
  1655  			Port:     ptr.To[int32](12),
  1656  			Protocol: ptr.To(v1.ProtocolUDP),
  1657  		}}
  1658  	}
  1659  	namedPortLocal := []*discovery.EndpointSlice{
  1660  		makeTestEndpointSlice("ns1", "ep1", 1,
  1661  			func(eps *discovery.EndpointSlice) {
  1662  				eps.AddressType = discovery.AddressTypeIPv4
  1663  				eps.Endpoints = []discovery.Endpoint{{
  1664  					Addresses: []string{"10.1.1.1"},
  1665  					NodeName:  ptr.To(testHostname),
  1666  				}}
  1667  				eps.Ports = []discovery.EndpointPort{{
  1668  					Name:     ptr.To("p11"),
  1669  					Port:     ptr.To[int32](11),
  1670  					Protocol: ptr.To(v1.ProtocolUDP),
  1671  				}}
  1672  			}),
  1673  	}
  1674  	namedPort := []*discovery.EndpointSlice{
  1675  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  1676  	}
  1677  	namedPortRenamed := []*discovery.EndpointSlice{
  1678  		makeTestEndpointSlice("ns1", "ep1", 1,
  1679  			func(eps *discovery.EndpointSlice) {
  1680  				eps.AddressType = discovery.AddressTypeIPv4
  1681  				eps.Endpoints = []discovery.Endpoint{{
  1682  					Addresses: []string{"10.1.1.1"},
  1683  				}}
  1684  				eps.Ports = []discovery.EndpointPort{{
  1685  					Name:     ptr.To("p11-2"),
  1686  					Port:     ptr.To[int32](11),
  1687  					Protocol: ptr.To(v1.ProtocolUDP),
  1688  				}}
  1689  			}),
  1690  	}
  1691  	namedPortRenumbered := []*discovery.EndpointSlice{
  1692  		makeTestEndpointSlice("ns1", "ep1", 1,
  1693  			func(eps *discovery.EndpointSlice) {
  1694  				eps.AddressType = discovery.AddressTypeIPv4
  1695  				eps.Endpoints = []discovery.Endpoint{{
  1696  					Addresses: []string{"10.1.1.1"},
  1697  				}}
  1698  				eps.Ports = []discovery.EndpointPort{{
  1699  					Name:     ptr.To("p11"),
  1700  					Port:     ptr.To[int32](22),
  1701  					Protocol: ptr.To(v1.ProtocolUDP),
  1702  				}}
  1703  			}),
  1704  	}
  1705  	namedPortsLocalNoLocal := []*discovery.EndpointSlice{
  1706  		makeTestEndpointSlice("ns1", "ep1", 1,
  1707  			func(eps *discovery.EndpointSlice) {
  1708  				eps.AddressType = discovery.AddressTypeIPv4
  1709  				eps.Endpoints = []discovery.Endpoint{{
  1710  					Addresses: []string{"10.1.1.1"},
  1711  				}, {
  1712  					Addresses: []string{"10.1.1.2"},
  1713  					NodeName:  ptr.To(testHostname),
  1714  				}}
  1715  				eps.Ports = []discovery.EndpointPort{{
  1716  					Name:     ptr.To("p11"),
  1717  					Port:     ptr.To[int32](11),
  1718  					Protocol: ptr.To(v1.ProtocolUDP),
  1719  				}, {
  1720  					Name:     ptr.To("p12"),
  1721  					Port:     ptr.To[int32](12),
  1722  					Protocol: ptr.To(v1.ProtocolUDP),
  1723  				}}
  1724  			}),
  1725  	}
  1726  	multipleSubsets := []*discovery.EndpointSlice{
  1727  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  1728  		makeTestEndpointSlice("ns1", "ep1", 2, subset2),
  1729  	}
  1730  	subsetLocal := func(eps *discovery.EndpointSlice) {
  1731  		eps.AddressType = discovery.AddressTypeIPv4
  1732  		eps.Endpoints = []discovery.Endpoint{{
  1733  			Addresses: []string{"10.1.1.2"},
  1734  			NodeName:  ptr.To(testHostname),
  1735  		}}
  1736  		eps.Ports = []discovery.EndpointPort{{
  1737  			Name:     ptr.To("p12"),
  1738  			Port:     ptr.To[int32](12),
  1739  			Protocol: ptr.To(v1.ProtocolUDP),
  1740  		}}
  1741  	}
  1742  	multipleSubsetsWithLocal := []*discovery.EndpointSlice{
  1743  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  1744  		makeTestEndpointSlice("ns1", "ep1", 2, subsetLocal),
  1745  	}
  1746  	subsetMultiplePortsLocal := func(eps *discovery.EndpointSlice) {
  1747  		eps.AddressType = discovery.AddressTypeIPv4
  1748  		eps.Endpoints = []discovery.Endpoint{{
  1749  			Addresses: []string{"10.1.1.1"},
  1750  			NodeName:  ptr.To(testHostname),
  1751  		}}
  1752  		eps.Ports = []discovery.EndpointPort{{
  1753  			Name:     ptr.To("p11"),
  1754  			Port:     ptr.To[int32](11),
  1755  			Protocol: ptr.To(v1.ProtocolUDP),
  1756  		}, {
  1757  			Name:     ptr.To("p12"),
  1758  			Port:     ptr.To[int32](12),
  1759  			Protocol: ptr.To(v1.ProtocolUDP),
  1760  		}}
  1761  	}
  1762  	subset3 := func(eps *discovery.EndpointSlice) {
  1763  		eps.AddressType = discovery.AddressTypeIPv4
  1764  		eps.Endpoints = []discovery.Endpoint{{
  1765  			Addresses: []string{"10.1.1.3"},
  1766  		}}
  1767  		eps.Ports = []discovery.EndpointPort{{
  1768  			Name:     ptr.To("p13"),
  1769  			Port:     ptr.To[int32](13),
  1770  			Protocol: ptr.To(v1.ProtocolUDP),
  1771  		}}
  1772  	}
  1773  	multipleSubsetsMultiplePortsLocal := []*discovery.EndpointSlice{
  1774  		makeTestEndpointSlice("ns1", "ep1", 1, subsetMultiplePortsLocal),
  1775  		makeTestEndpointSlice("ns1", "ep1", 2, subset3),
  1776  	}
  1777  	subsetMultipleIPsPorts1 := func(eps *discovery.EndpointSlice) {
  1778  		eps.AddressType = discovery.AddressTypeIPv4
  1779  		eps.Endpoints = []discovery.Endpoint{{
  1780  			Addresses: []string{"10.1.1.1"},
  1781  		}, {
  1782  			Addresses: []string{"10.1.1.2"},
  1783  			NodeName:  ptr.To(testHostname),
  1784  		}}
  1785  		eps.Ports = []discovery.EndpointPort{{
  1786  			Name:     ptr.To("p11"),
  1787  			Port:     ptr.To[int32](11),
  1788  			Protocol: ptr.To(v1.ProtocolUDP),
  1789  		}, {
  1790  			Name:     ptr.To("p12"),
  1791  			Port:     ptr.To[int32](12),
  1792  			Protocol: ptr.To(v1.ProtocolUDP),
  1793  		}}
  1794  	}
  1795  	subsetMultipleIPsPorts2 := func(eps *discovery.EndpointSlice) {
  1796  		eps.AddressType = discovery.AddressTypeIPv4
  1797  		eps.Endpoints = []discovery.Endpoint{{
  1798  			Addresses: []string{"10.1.1.3"},
  1799  		}, {
  1800  			Addresses: []string{"10.1.1.4"},
  1801  			NodeName:  ptr.To(testHostname),
  1802  		}}
  1803  		eps.Ports = []discovery.EndpointPort{{
  1804  			Name:     ptr.To("p13"),
  1805  			Port:     ptr.To[int32](13),
  1806  			Protocol: ptr.To(v1.ProtocolUDP),
  1807  		}, {
  1808  			Name:     ptr.To("p14"),
  1809  			Port:     ptr.To[int32](14),
  1810  			Protocol: ptr.To(v1.ProtocolUDP),
  1811  		}}
  1812  	}
  1813  	subsetMultipleIPsPorts3 := func(eps *discovery.EndpointSlice) {
  1814  		eps.AddressType = discovery.AddressTypeIPv4
  1815  		eps.Endpoints = []discovery.Endpoint{{
  1816  			Addresses: []string{"10.2.2.1"},
  1817  		}, {
  1818  			Addresses: []string{"10.2.2.2"},
  1819  			NodeName:  ptr.To(testHostname),
  1820  		}}
  1821  		eps.Ports = []discovery.EndpointPort{{
  1822  			Name:     ptr.To("p21"),
  1823  			Port:     ptr.To[int32](21),
  1824  			Protocol: ptr.To(v1.ProtocolUDP),
  1825  		}, {
  1826  			Name:     ptr.To("p22"),
  1827  			Port:     ptr.To[int32](22),
  1828  			Protocol: ptr.To(v1.ProtocolUDP),
  1829  		}}
  1830  	}
  1831  	multipleSubsetsIPsPorts := []*discovery.EndpointSlice{
  1832  		makeTestEndpointSlice("ns1", "ep1", 1, subsetMultipleIPsPorts1),
  1833  		makeTestEndpointSlice("ns1", "ep1", 2, subsetMultipleIPsPorts2),
  1834  		makeTestEndpointSlice("ns2", "ep2", 1, subsetMultipleIPsPorts3),
  1835  	}
  1836  	complexSubset1 := func(eps *discovery.EndpointSlice) {
  1837  		eps.AddressType = discovery.AddressTypeIPv4
  1838  		eps.Endpoints = []discovery.Endpoint{{
  1839  			Addresses: []string{"10.2.2.2"},
  1840  			NodeName:  ptr.To(testHostname),
  1841  		}, {
  1842  			Addresses: []string{"10.2.2.22"},
  1843  			NodeName:  ptr.To(testHostname),
  1844  		}}
  1845  		eps.Ports = []discovery.EndpointPort{{
  1846  			Name:     ptr.To("p22"),
  1847  			Port:     ptr.To[int32](22),
  1848  			Protocol: ptr.To(v1.ProtocolUDP),
  1849  		}}
  1850  	}
  1851  	complexSubset2 := func(eps *discovery.EndpointSlice) {
  1852  		eps.AddressType = discovery.AddressTypeIPv4
  1853  		eps.Endpoints = []discovery.Endpoint{{
  1854  			Addresses: []string{"10.2.2.3"},
  1855  			NodeName:  ptr.To(testHostname),
  1856  		}}
  1857  		eps.Ports = []discovery.EndpointPort{{
  1858  			Name:     ptr.To("p23"),
  1859  			Port:     ptr.To[int32](23),
  1860  			Protocol: ptr.To(v1.ProtocolUDP),
  1861  		}}
  1862  	}
  1863  	complexSubset3 := func(eps *discovery.EndpointSlice) {
  1864  		eps.AddressType = discovery.AddressTypeIPv4
  1865  		eps.Endpoints = []discovery.Endpoint{{
  1866  			Addresses: []string{"10.4.4.4"},
  1867  			NodeName:  ptr.To(testHostname),
  1868  		}, {
  1869  			Addresses: []string{"10.4.4.5"},
  1870  			NodeName:  ptr.To(testHostname),
  1871  		}}
  1872  		eps.Ports = []discovery.EndpointPort{{
  1873  			Name:     ptr.To("p44"),
  1874  			Port:     ptr.To[int32](44),
  1875  			Protocol: ptr.To(v1.ProtocolUDP),
  1876  		}}
  1877  	}
  1878  	complexSubset4 := func(eps *discovery.EndpointSlice) {
  1879  		eps.AddressType = discovery.AddressTypeIPv4
  1880  		eps.Endpoints = []discovery.Endpoint{{
  1881  			Addresses: []string{"10.4.4.6"},
  1882  			NodeName:  ptr.To(testHostname),
  1883  		}}
  1884  		eps.Ports = []discovery.EndpointPort{{
  1885  			Name:     ptr.To("p45"),
  1886  			Port:     ptr.To[int32](45),
  1887  			Protocol: ptr.To(v1.ProtocolUDP),
  1888  		}}
  1889  	}
  1890  	complexSubset5 := func(eps *discovery.EndpointSlice) {
  1891  		eps.AddressType = discovery.AddressTypeIPv4
  1892  		eps.Endpoints = []discovery.Endpoint{{
  1893  			Addresses: []string{"10.1.1.1"},
  1894  		}, {
  1895  			Addresses: []string{"10.1.1.11"},
  1896  		}}
  1897  		eps.Ports = []discovery.EndpointPort{{
  1898  			Name:     ptr.To("p11"),
  1899  			Port:     ptr.To[int32](11),
  1900  			Protocol: ptr.To(v1.ProtocolUDP),
  1901  		}}
  1902  	}
  1903  	complexSubset6 := func(eps *discovery.EndpointSlice) {
  1904  		eps.AddressType = discovery.AddressTypeIPv4
  1905  		eps.Endpoints = []discovery.Endpoint{{
  1906  			Addresses: []string{"10.1.1.2"},
  1907  		}}
  1908  		eps.Ports = []discovery.EndpointPort{{
  1909  			Name:     ptr.To("p12"),
  1910  			Port:     ptr.To[int32](12),
  1911  			Protocol: ptr.To(v1.ProtocolUDP),
  1912  		}, {
  1913  			Name:     ptr.To("p122"),
  1914  			Port:     ptr.To[int32](122),
  1915  			Protocol: ptr.To(v1.ProtocolUDP),
  1916  		}}
  1917  	}
  1918  	complexSubset7 := func(eps *discovery.EndpointSlice) {
  1919  		eps.AddressType = discovery.AddressTypeIPv4
  1920  		eps.Endpoints = []discovery.Endpoint{{
  1921  			Addresses: []string{"10.3.3.3"},
  1922  		}}
  1923  		eps.Ports = []discovery.EndpointPort{{
  1924  			Name:     ptr.To("p33"),
  1925  			Port:     ptr.To[int32](33),
  1926  			Protocol: ptr.To(v1.ProtocolUDP),
  1927  		}}
  1928  	}
  1929  	complexSubset8 := func(eps *discovery.EndpointSlice) {
  1930  		eps.AddressType = discovery.AddressTypeIPv4
  1931  		eps.Endpoints = []discovery.Endpoint{{
  1932  			Addresses: []string{"10.4.4.4"},
  1933  			NodeName:  ptr.To(testHostname),
  1934  		}}
  1935  		eps.Ports = []discovery.EndpointPort{{
  1936  			Name:     ptr.To("p44"),
  1937  			Port:     ptr.To[int32](44),
  1938  			Protocol: ptr.To(v1.ProtocolUDP),
  1939  		}}
  1940  	}
  1941  	complexBefore := []*discovery.EndpointSlice{
  1942  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  1943  		nil,
  1944  		makeTestEndpointSlice("ns2", "ep2", 1, complexSubset1),
  1945  		makeTestEndpointSlice("ns2", "ep2", 2, complexSubset2),
  1946  		nil,
  1947  		makeTestEndpointSlice("ns4", "ep4", 1, complexSubset3),
  1948  		makeTestEndpointSlice("ns4", "ep4", 2, complexSubset4),
  1949  	}
  1950  	complexAfter := []*discovery.EndpointSlice{
  1951  		makeTestEndpointSlice("ns1", "ep1", 1, complexSubset5),
  1952  		makeTestEndpointSlice("ns1", "ep1", 2, complexSubset6),
  1953  		nil,
  1954  		nil,
  1955  		makeTestEndpointSlice("ns3", "ep3", 1, complexSubset7),
  1956  		makeTestEndpointSlice("ns4", "ep4", 1, complexSubset8),
  1957  		nil,
  1958  	}
  1959  
  1960  	testCases := []struct {
  1961  		// previousEndpoints and currentEndpoints are used to call appropriate
  1962  		// handlers OnEndpoints* (based on whether corresponding values are nil
  1963  		// or non-nil) and must be of equal length.
  1964  		name                           string
  1965  		previousEndpoints              []*discovery.EndpointSlice
  1966  		currentEndpoints               []*discovery.EndpointSlice
  1967  		oldEndpoints                   map[proxy.ServicePortName][]endpointExpectation
  1968  		expectedResult                 map[proxy.ServicePortName][]endpointExpectation
  1969  		expectedDeletedUDPEndpoints    []proxy.ServiceEndpoint
  1970  		expectedNewlyActiveUDPServices map[proxy.ServicePortName]bool
  1971  		expectedLocalEndpoints         map[types.NamespacedName]int
  1972  	}{{
  1973  		// Case[0]: nothing
  1974  		name:                           "nothing",
  1975  		oldEndpoints:                   map[proxy.ServicePortName][]endpointExpectation{},
  1976  		expectedResult:                 map[proxy.ServicePortName][]endpointExpectation{},
  1977  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  1978  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  1979  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  1980  	}, {
  1981  		// Case[1]: no change, named port, local
  1982  		name:              "no change, named port, local",
  1983  		previousEndpoints: namedPortLocal,
  1984  		currentEndpoints:  namedPortLocal,
  1985  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  1986  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  1987  				{endpoint: "10.1.1.1:11", isLocal: true},
  1988  			},
  1989  		},
  1990  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  1991  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  1992  				{endpoint: "10.1.1.1:11", isLocal: true},
  1993  			},
  1994  		},
  1995  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  1996  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  1997  		expectedLocalEndpoints: map[types.NamespacedName]int{
  1998  			makeNSN("ns1", "ep1"): 1,
  1999  		},
  2000  	}, {
  2001  		// Case[2]: no change, multiple subsets
  2002  		name:              "no change, multiple subsets",
  2003  		previousEndpoints: multipleSubsets,
  2004  		currentEndpoints:  multipleSubsets,
  2005  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2006  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2007  				{endpoint: "10.1.1.1:11", isLocal: false},
  2008  			},
  2009  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2010  				{endpoint: "10.1.1.2:12", isLocal: false},
  2011  			},
  2012  		},
  2013  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2014  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2015  				{endpoint: "10.1.1.1:11", isLocal: false},
  2016  			},
  2017  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2018  				{endpoint: "10.1.1.2:12", isLocal: false},
  2019  			},
  2020  		},
  2021  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  2022  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  2023  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  2024  	}, {
  2025  		// Case[3]: no change, multiple subsets, multiple ports, local
  2026  		name:              "no change, multiple subsets, multiple ports, local",
  2027  		previousEndpoints: multipleSubsetsMultiplePortsLocal,
  2028  		currentEndpoints:  multipleSubsetsMultiplePortsLocal,
  2029  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2030  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2031  				{endpoint: "10.1.1.1:11", isLocal: true},
  2032  			},
  2033  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2034  				{endpoint: "10.1.1.1:12", isLocal: true},
  2035  			},
  2036  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  2037  				{endpoint: "10.1.1.3:13", isLocal: false},
  2038  			},
  2039  		},
  2040  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2041  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2042  				{endpoint: "10.1.1.1:11", isLocal: true},
  2043  			},
  2044  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2045  				{endpoint: "10.1.1.1:12", isLocal: true},
  2046  			},
  2047  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  2048  				{endpoint: "10.1.1.3:13", isLocal: false},
  2049  			},
  2050  		},
  2051  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  2052  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  2053  		expectedLocalEndpoints: map[types.NamespacedName]int{
  2054  			makeNSN("ns1", "ep1"): 1,
  2055  		},
  2056  	}, {
  2057  		// Case[4]: no change, multiple endpoints, subsets, IPs, and ports
  2058  		name:              "no change, multiple endpoints, subsets, IPs, and ports",
  2059  		previousEndpoints: multipleSubsetsIPsPorts,
  2060  		currentEndpoints:  multipleSubsetsIPsPorts,
  2061  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2062  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2063  				{endpoint: "10.1.1.1:11", isLocal: false},
  2064  				{endpoint: "10.1.1.2:11", isLocal: true},
  2065  			},
  2066  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2067  				{endpoint: "10.1.1.1:12", isLocal: false},
  2068  				{endpoint: "10.1.1.2:12", isLocal: true},
  2069  			},
  2070  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  2071  				{endpoint: "10.1.1.3:13", isLocal: false},
  2072  				{endpoint: "10.1.1.4:13", isLocal: true},
  2073  			},
  2074  			makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
  2075  				{endpoint: "10.1.1.3:14", isLocal: false},
  2076  				{endpoint: "10.1.1.4:14", isLocal: true},
  2077  			},
  2078  			makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
  2079  				{endpoint: "10.2.2.1:21", isLocal: false},
  2080  				{endpoint: "10.2.2.2:21", isLocal: true},
  2081  			},
  2082  			makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
  2083  				{endpoint: "10.2.2.1:22", isLocal: false},
  2084  				{endpoint: "10.2.2.2:22", isLocal: true},
  2085  			},
  2086  		},
  2087  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2088  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2089  				{endpoint: "10.1.1.1:11", isLocal: false},
  2090  				{endpoint: "10.1.1.2:11", isLocal: true},
  2091  			},
  2092  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2093  				{endpoint: "10.1.1.1:12", isLocal: false},
  2094  				{endpoint: "10.1.1.2:12", isLocal: true},
  2095  			},
  2096  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  2097  				{endpoint: "10.1.1.3:13", isLocal: false},
  2098  				{endpoint: "10.1.1.4:13", isLocal: true},
  2099  			},
  2100  			makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
  2101  				{endpoint: "10.1.1.3:14", isLocal: false},
  2102  				{endpoint: "10.1.1.4:14", isLocal: true},
  2103  			},
  2104  			makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
  2105  				{endpoint: "10.2.2.1:21", isLocal: false},
  2106  				{endpoint: "10.2.2.2:21", isLocal: true},
  2107  			},
  2108  			makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
  2109  				{endpoint: "10.2.2.1:22", isLocal: false},
  2110  				{endpoint: "10.2.2.2:22", isLocal: true},
  2111  			},
  2112  		},
  2113  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  2114  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  2115  		expectedLocalEndpoints: map[types.NamespacedName]int{
  2116  			makeNSN("ns1", "ep1"): 2,
  2117  			makeNSN("ns2", "ep2"): 1,
  2118  		},
  2119  	}, {
  2120  		// Case[5]: add an Endpoints
  2121  		name:              "add an Endpoints",
  2122  		previousEndpoints: []*discovery.EndpointSlice{nil},
  2123  		currentEndpoints:  namedPortLocal,
  2124  		oldEndpoints:      map[proxy.ServicePortName][]endpointExpectation{},
  2125  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2126  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2127  				{endpoint: "10.1.1.1:11", isLocal: true},
  2128  			},
  2129  		},
  2130  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  2131  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  2132  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): true,
  2133  		},
  2134  		expectedLocalEndpoints: map[types.NamespacedName]int{
  2135  			makeNSN("ns1", "ep1"): 1,
  2136  		},
  2137  	}, {
  2138  		// Case[6]: remove an Endpoints
  2139  		name:              "remove an Endpoints",
  2140  		previousEndpoints: namedPortLocal,
  2141  		currentEndpoints:  []*discovery.EndpointSlice{nil},
  2142  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2143  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2144  				{endpoint: "10.1.1.1:11", isLocal: true},
  2145  			},
  2146  		},
  2147  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{},
  2148  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  2149  			Endpoint:        "10.1.1.1:11",
  2150  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  2151  		}},
  2152  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  2153  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  2154  	}, {
  2155  		// Case[7]: add an IP and port
  2156  		name:              "add an IP and port",
  2157  		previousEndpoints: namedPort,
  2158  		currentEndpoints:  namedPortsLocalNoLocal,
  2159  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2160  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2161  				{endpoint: "10.1.1.1:11", isLocal: false},
  2162  			},
  2163  		},
  2164  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2165  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2166  				{endpoint: "10.1.1.1:11", isLocal: false},
  2167  				{endpoint: "10.1.1.2:11", isLocal: true},
  2168  			},
  2169  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2170  				{endpoint: "10.1.1.1:12", isLocal: false},
  2171  				{endpoint: "10.1.1.2:12", isLocal: true},
  2172  			},
  2173  		},
  2174  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  2175  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  2176  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
  2177  		},
  2178  		expectedLocalEndpoints: map[types.NamespacedName]int{
  2179  			makeNSN("ns1", "ep1"): 1,
  2180  		},
  2181  	}, {
  2182  		// Case[8]: remove an IP and port
  2183  		name:              "remove an IP and port",
  2184  		previousEndpoints: namedPortsLocalNoLocal,
  2185  		currentEndpoints:  namedPort,
  2186  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2187  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2188  				{endpoint: "10.1.1.1:11", isLocal: false},
  2189  				{endpoint: "10.1.1.2:11", isLocal: true},
  2190  			},
  2191  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2192  				{endpoint: "10.1.1.1:12", isLocal: false},
  2193  				{endpoint: "10.1.1.2:12", isLocal: true},
  2194  			},
  2195  		},
  2196  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2197  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2198  				{endpoint: "10.1.1.1:11", isLocal: false},
  2199  			},
  2200  		},
  2201  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  2202  			Endpoint:        "10.1.1.2:11",
  2203  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  2204  		}, {
  2205  			Endpoint:        "10.1.1.1:12",
  2206  			ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
  2207  		}, {
  2208  			Endpoint:        "10.1.1.2:12",
  2209  			ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
  2210  		}},
  2211  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  2212  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  2213  	}, {
  2214  		// Case[9]: add a subset
  2215  		name:              "add a subset",
  2216  		previousEndpoints: []*discovery.EndpointSlice{namedPort[0], nil},
  2217  		currentEndpoints:  multipleSubsetsWithLocal,
  2218  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2219  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2220  				{endpoint: "10.1.1.1:11", isLocal: false},
  2221  			},
  2222  		},
  2223  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2224  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2225  				{endpoint: "10.1.1.1:11", isLocal: false},
  2226  			},
  2227  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2228  				{endpoint: "10.1.1.2:12", isLocal: true},
  2229  			},
  2230  		},
  2231  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  2232  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  2233  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
  2234  		},
  2235  		expectedLocalEndpoints: map[types.NamespacedName]int{
  2236  			makeNSN("ns1", "ep1"): 1,
  2237  		},
  2238  	}, {
  2239  		// Case[10]: remove a subset
  2240  		name:              "remove a subset",
  2241  		previousEndpoints: multipleSubsets,
  2242  		currentEndpoints:  []*discovery.EndpointSlice{namedPort[0], nil},
  2243  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2244  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2245  				{endpoint: "10.1.1.1:11", isLocal: false},
  2246  			},
  2247  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2248  				{endpoint: "10.1.1.2:12", isLocal: false},
  2249  			},
  2250  		},
  2251  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2252  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2253  				{endpoint: "10.1.1.1:11", isLocal: false},
  2254  			},
  2255  		},
  2256  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  2257  			Endpoint:        "10.1.1.2:12",
  2258  			ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
  2259  		}},
  2260  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  2261  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  2262  	}, {
  2263  		// Case[11]: rename a port
  2264  		name:              "rename a port",
  2265  		previousEndpoints: namedPort,
  2266  		currentEndpoints:  namedPortRenamed,
  2267  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2268  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2269  				{endpoint: "10.1.1.1:11", isLocal: false},
  2270  			},
  2271  		},
  2272  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2273  			makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): {
  2274  				{endpoint: "10.1.1.1:11", isLocal: false},
  2275  			},
  2276  		},
  2277  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  2278  			Endpoint:        "10.1.1.1:11",
  2279  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  2280  		}},
  2281  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  2282  			makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): true,
  2283  		},
  2284  		expectedLocalEndpoints: map[types.NamespacedName]int{},
  2285  	}, {
  2286  		// Case[12]: renumber a port
  2287  		name:              "renumber a port",
  2288  		previousEndpoints: namedPort,
  2289  		currentEndpoints:  namedPortRenumbered,
  2290  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2291  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2292  				{endpoint: "10.1.1.1:11", isLocal: false},
  2293  			},
  2294  		},
  2295  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2296  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2297  				{endpoint: "10.1.1.1:22", isLocal: false},
  2298  			},
  2299  		},
  2300  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  2301  			Endpoint:        "10.1.1.1:11",
  2302  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  2303  		}},
  2304  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  2305  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  2306  	}, {
  2307  		// Case[13]: complex add and remove
  2308  		name:              "complex add and remove",
  2309  		previousEndpoints: complexBefore,
  2310  		currentEndpoints:  complexAfter,
  2311  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2312  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2313  				{endpoint: "10.1.1.1:11", isLocal: false},
  2314  			},
  2315  			makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
  2316  				{endpoint: "10.2.2.22:22", isLocal: true},
  2317  				{endpoint: "10.2.2.2:22", isLocal: true},
  2318  			},
  2319  			makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP): {
  2320  				{endpoint: "10.2.2.3:23", isLocal: true},
  2321  			},
  2322  			makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
  2323  				{endpoint: "10.4.4.4:44", isLocal: true},
  2324  				{endpoint: "10.4.4.5:44", isLocal: true},
  2325  			},
  2326  			makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP): {
  2327  				{endpoint: "10.4.4.6:45", isLocal: true},
  2328  			},
  2329  		},
  2330  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2331  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2332  				{endpoint: "10.1.1.11:11", isLocal: false},
  2333  				{endpoint: "10.1.1.1:11", isLocal: false},
  2334  			},
  2335  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2336  				{endpoint: "10.1.1.2:12", isLocal: false},
  2337  			},
  2338  			makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): {
  2339  				{endpoint: "10.1.1.2:122", isLocal: false},
  2340  			},
  2341  			makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): {
  2342  				{endpoint: "10.3.3.3:33", isLocal: false},
  2343  			},
  2344  			makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
  2345  				{endpoint: "10.4.4.4:44", isLocal: true},
  2346  			},
  2347  		},
  2348  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  2349  			Endpoint:        "10.2.2.2:22",
  2350  			ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
  2351  		}, {
  2352  			Endpoint:        "10.2.2.22:22",
  2353  			ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
  2354  		}, {
  2355  			Endpoint:        "10.2.2.3:23",
  2356  			ServicePortName: makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP),
  2357  		}, {
  2358  			Endpoint:        "10.4.4.5:44",
  2359  			ServicePortName: makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP),
  2360  		}, {
  2361  			Endpoint:        "10.4.4.6:45",
  2362  			ServicePortName: makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP),
  2363  		}},
  2364  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  2365  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP):  true,
  2366  			makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): true,
  2367  			makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP):  true,
  2368  		},
  2369  		expectedLocalEndpoints: map[types.NamespacedName]int{
  2370  			makeNSN("ns4", "ep4"): 1,
  2371  		},
  2372  	}, {
  2373  		// Case[14]: change from 0 endpoint address to 1 unnamed port
  2374  		name:              "change from 0 endpoint address to 1 unnamed port",
  2375  		previousEndpoints: emptyEndpointSlices,
  2376  		currentEndpoints:  namedPort,
  2377  		oldEndpoints:      map[proxy.ServicePortName][]endpointExpectation{},
  2378  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2379  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2380  				{endpoint: "10.1.1.1:11", isLocal: false},
  2381  			},
  2382  		},
  2383  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  2384  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  2385  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): true,
  2386  		},
  2387  		expectedLocalEndpoints: map[types.NamespacedName]int{},
  2388  	},
  2389  	}
  2390  
  2391  	for tci, tc := range testCases {
  2392  		t.Run(tc.name, func(t *testing.T) {
  2393  			_, fp := NewFakeProxier(v1.IPv4Protocol)
  2394  			fp.hostname = testHostname
  2395  
  2396  			// First check that after adding all previous versions of endpoints,
  2397  			// the fp.oldEndpoints is as we expect.
  2398  			for i := range tc.previousEndpoints {
  2399  				if tc.previousEndpoints[i] != nil {
  2400  					fp.OnEndpointSliceAdd(tc.previousEndpoints[i])
  2401  				}
  2402  			}
  2403  			fp.endpointsMap.Update(fp.endpointsChanges)
  2404  			checkEndpointExpectations(t, tci, fp.endpointsMap, tc.oldEndpoints)
  2405  
  2406  			// Now let's call appropriate handlers to get to state we want to be.
  2407  			if len(tc.previousEndpoints) != len(tc.currentEndpoints) {
  2408  				t.Fatalf("[%d] different lengths of previous and current endpoints", tci)
  2409  			}
  2410  
  2411  			for i := range tc.previousEndpoints {
  2412  				prev, curr := tc.previousEndpoints[i], tc.currentEndpoints[i]
  2413  				switch {
  2414  				case prev == nil:
  2415  					fp.OnEndpointSliceAdd(curr)
  2416  				case curr == nil:
  2417  					fp.OnEndpointSliceDelete(prev)
  2418  				default:
  2419  					fp.OnEndpointSliceUpdate(prev, curr)
  2420  				}
  2421  			}
  2422  			result := fp.endpointsMap.Update(fp.endpointsChanges)
  2423  			newMap := fp.endpointsMap
  2424  			checkEndpointExpectations(t, tci, newMap, tc.expectedResult)
  2425  			if len(result.DeletedUDPEndpoints) != len(tc.expectedDeletedUDPEndpoints) {
  2426  				t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedDeletedUDPEndpoints), len(result.DeletedUDPEndpoints), result.DeletedUDPEndpoints)
  2427  			}
  2428  			for _, x := range tc.expectedDeletedUDPEndpoints {
  2429  				found := false
  2430  				for _, stale := range result.DeletedUDPEndpoints {
  2431  					if stale == x {
  2432  						found = true
  2433  						break
  2434  					}
  2435  				}
  2436  				if !found {
  2437  					t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.DeletedUDPEndpoints)
  2438  				}
  2439  			}
  2440  			if len(result.NewlyActiveUDPServices) != len(tc.expectedNewlyActiveUDPServices) {
  2441  				t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedNewlyActiveUDPServices), len(result.NewlyActiveUDPServices), result.NewlyActiveUDPServices)
  2442  			}
  2443  			for svcName := range tc.expectedNewlyActiveUDPServices {
  2444  				found := false
  2445  				for _, stale := range result.NewlyActiveUDPServices {
  2446  					if stale == svcName {
  2447  						found = true
  2448  					}
  2449  				}
  2450  				if !found {
  2451  					t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.NewlyActiveUDPServices)
  2452  				}
  2453  			}
  2454  			localReadyEndpoints := fp.endpointsMap.LocalReadyEndpoints()
  2455  			if !reflect.DeepEqual(localReadyEndpoints, tc.expectedLocalEndpoints) {
  2456  				t.Errorf("[%d] expected local endpoints %v, got %v", tci, tc.expectedLocalEndpoints, localReadyEndpoints)
  2457  			}
  2458  		})
  2459  	}
  2460  }
  2461  
  2462  // TestHealthCheckNodePortWhenTerminating tests that health check node ports are not enabled when all local endpoints are terminating
  2463  func TestHealthCheckNodePortWhenTerminating(t *testing.T) {
  2464  	_, fp := NewFakeProxier(v1.IPv4Protocol)
  2465  	fp.OnServiceSynced()
  2466  	fp.OnEndpointSlicesSynced()
  2467  
  2468  	serviceName := "svc1"
  2469  	namespaceName := "ns1"
  2470  
  2471  	fp.OnServiceAdd(&v1.Service{
  2472  		ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
  2473  		Spec: v1.ServiceSpec{
  2474  			ClusterIP: "172.30.1.1",
  2475  			Selector:  map[string]string{"foo": "bar"},
  2476  			Ports:     []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt32(80), Protocol: v1.ProtocolTCP}},
  2477  		},
  2478  	})
  2479  
  2480  	endpointSlice := &discovery.EndpointSlice{
  2481  		ObjectMeta: metav1.ObjectMeta{
  2482  			Name:      fmt.Sprintf("%s-1", serviceName),
  2483  			Namespace: namespaceName,
  2484  			Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  2485  		},
  2486  		Ports: []discovery.EndpointPort{{
  2487  			Name:     ptr.To(""),
  2488  			Port:     ptr.To[int32](80),
  2489  			Protocol: ptr.To(v1.ProtocolTCP),
  2490  		}},
  2491  		AddressType: discovery.AddressTypeIPv4,
  2492  		Endpoints: []discovery.Endpoint{{
  2493  			Addresses:  []string{"10.0.1.1"},
  2494  			Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  2495  			NodeName:   ptr.To(testHostname),
  2496  		}, {
  2497  			Addresses:  []string{"10.0.1.2"},
  2498  			Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  2499  			NodeName:   ptr.To(testHostname),
  2500  		}, {
  2501  			Addresses:  []string{"10.0.1.3"},
  2502  			Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  2503  			NodeName:   ptr.To(testHostname),
  2504  		}, { // not ready endpoints should be ignored
  2505  			Addresses:  []string{"10.0.1.4"},
  2506  			Conditions: discovery.EndpointConditions{Ready: ptr.To(false)},
  2507  			NodeName:   ptr.To(testHostname),
  2508  		}},
  2509  	}
  2510  
  2511  	fp.OnEndpointSliceAdd(endpointSlice)
  2512  	_ = fp.endpointsMap.Update(fp.endpointsChanges)
  2513  	localReadyEndpoints := fp.endpointsMap.LocalReadyEndpoints()
  2514  	if len(localReadyEndpoints) != 1 {
  2515  		t.Errorf("unexpected number of local ready endpoints, expected 1 but got: %d", len(localReadyEndpoints))
  2516  	}
  2517  
  2518  	// set all endpoints to terminating
  2519  	endpointSliceTerminating := &discovery.EndpointSlice{
  2520  		ObjectMeta: metav1.ObjectMeta{
  2521  			Name:      fmt.Sprintf("%s-1", serviceName),
  2522  			Namespace: namespaceName,
  2523  			Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  2524  		},
  2525  		Ports: []discovery.EndpointPort{{
  2526  			Name:     ptr.To(""),
  2527  			Port:     ptr.To[int32](80),
  2528  			Protocol: ptr.To(v1.ProtocolTCP),
  2529  		}},
  2530  		AddressType: discovery.AddressTypeIPv4,
  2531  		Endpoints: []discovery.Endpoint{{
  2532  			Addresses: []string{"10.0.1.1"},
  2533  			Conditions: discovery.EndpointConditions{
  2534  				Ready:       ptr.To(false),
  2535  				Serving:     ptr.To(true),
  2536  				Terminating: ptr.To(false),
  2537  			},
  2538  			NodeName: ptr.To(testHostname),
  2539  		}, {
  2540  			Addresses: []string{"10.0.1.2"},
  2541  			Conditions: discovery.EndpointConditions{
  2542  				Ready:       ptr.To(false),
  2543  				Serving:     ptr.To(true),
  2544  				Terminating: ptr.To(true),
  2545  			},
  2546  			NodeName: ptr.To(testHostname),
  2547  		}, {
  2548  			Addresses: []string{"10.0.1.3"},
  2549  			Conditions: discovery.EndpointConditions{
  2550  				Ready:       ptr.To(false),
  2551  				Serving:     ptr.To(true),
  2552  				Terminating: ptr.To(true),
  2553  			},
  2554  			NodeName: ptr.To(testHostname),
  2555  		}, { // not ready endpoints should be ignored
  2556  			Addresses: []string{"10.0.1.4"},
  2557  			Conditions: discovery.EndpointConditions{
  2558  				Ready:       ptr.To(false),
  2559  				Serving:     ptr.To(false),
  2560  				Terminating: ptr.To(true),
  2561  			},
  2562  			NodeName: ptr.To(testHostname),
  2563  		}},
  2564  	}
  2565  
  2566  	fp.OnEndpointSliceUpdate(endpointSlice, endpointSliceTerminating)
  2567  	_ = fp.endpointsMap.Update(fp.endpointsChanges)
  2568  	localReadyEndpoints = fp.endpointsMap.LocalReadyEndpoints()
  2569  	if len(localReadyEndpoints) != 0 {
  2570  		t.Errorf("unexpected number of local ready endpoints, expected 0 but got: %d", len(localReadyEndpoints))
  2571  	}
  2572  }
  2573  
  2574  // TODO(thockin): add *more* tests for syncProxyRules() or break it down further and test the pieces.
  2575  
  2576  // This test ensures that the iptables proxier supports translating Endpoints to
  2577  // iptables output when internalTrafficPolicy is specified
  2578  func TestInternalTrafficPolicy(t *testing.T) {
  2579  	type endpoint struct {
  2580  		ip       string
  2581  		hostname string
  2582  	}
  2583  
  2584  	testCases := []struct {
  2585  		name                  string
  2586  		line                  string
  2587  		internalTrafficPolicy *v1.ServiceInternalTrafficPolicy
  2588  		endpoints             []endpoint
  2589  		flowTests             []packetFlowTest
  2590  	}{
  2591  		{
  2592  			name:                  "internalTrafficPolicy is cluster",
  2593  			line:                  getLine(),
  2594  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyCluster),
  2595  			endpoints: []endpoint{
  2596  				{"10.0.1.1", testHostname},
  2597  				{"10.0.1.2", "host1"},
  2598  				{"10.0.1.3", "host2"},
  2599  			},
  2600  			flowTests: []packetFlowTest{
  2601  				{
  2602  					name:     "pod to ClusterIP hits all endpoints",
  2603  					sourceIP: "10.0.0.2",
  2604  					destIP:   "172.30.1.1",
  2605  					destPort: 80,
  2606  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.3:80",
  2607  					masq:     false,
  2608  				},
  2609  			},
  2610  		},
  2611  		{
  2612  			name:                  "internalTrafficPolicy is local and there is one local endpoint",
  2613  			line:                  getLine(),
  2614  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  2615  			endpoints: []endpoint{
  2616  				{"10.0.1.1", testHostname},
  2617  				{"10.0.1.2", "host1"},
  2618  				{"10.0.1.3", "host2"},
  2619  			},
  2620  			flowTests: []packetFlowTest{
  2621  				{
  2622  					name:     "pod to ClusterIP hits only local endpoint",
  2623  					sourceIP: "10.0.0.2",
  2624  					destIP:   "172.30.1.1",
  2625  					destPort: 80,
  2626  					output:   "10.0.1.1:80",
  2627  					masq:     false,
  2628  				},
  2629  			},
  2630  		},
  2631  		{
  2632  			name:                  "internalTrafficPolicy is local and there are multiple local endpoints",
  2633  			line:                  getLine(),
  2634  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  2635  			endpoints: []endpoint{
  2636  				{"10.0.1.1", testHostname},
  2637  				{"10.0.1.2", testHostname},
  2638  				{"10.0.1.3", "host2"},
  2639  			},
  2640  			flowTests: []packetFlowTest{
  2641  				{
  2642  					name:     "pod to ClusterIP hits all local endpoints",
  2643  					sourceIP: "10.0.0.2",
  2644  					destIP:   "172.30.1.1",
  2645  					destPort: 80,
  2646  					output:   "10.0.1.1:80, 10.0.1.2:80",
  2647  					masq:     false,
  2648  				},
  2649  			},
  2650  		},
  2651  		{
  2652  			name:                  "internalTrafficPolicy is local and there are no local endpoints",
  2653  			line:                  getLine(),
  2654  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  2655  			endpoints: []endpoint{
  2656  				{"10.0.1.1", "host0"},
  2657  				{"10.0.1.2", "host1"},
  2658  				{"10.0.1.3", "host2"},
  2659  			},
  2660  			flowTests: []packetFlowTest{
  2661  				{
  2662  					name:     "no endpoints",
  2663  					sourceIP: "10.0.0.2",
  2664  					destIP:   "172.30.1.1",
  2665  					destPort: 80,
  2666  					output:   "DROP",
  2667  				},
  2668  			},
  2669  		},
  2670  	}
  2671  
  2672  	for _, tc := range testCases {
  2673  		t.Run(tc.name, func(t *testing.T) {
  2674  			nft, fp := NewFakeProxier(v1.IPv4Protocol)
  2675  			fp.OnServiceSynced()
  2676  			fp.OnEndpointSlicesSynced()
  2677  
  2678  			serviceName := "svc1"
  2679  			namespaceName := "ns1"
  2680  
  2681  			svc := &v1.Service{
  2682  				ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
  2683  				Spec: v1.ServiceSpec{
  2684  					ClusterIP: "172.30.1.1",
  2685  					Selector:  map[string]string{"foo": "bar"},
  2686  					Ports:     []v1.ServicePort{{Name: "", Port: 80, Protocol: v1.ProtocolTCP}},
  2687  				},
  2688  			}
  2689  			if tc.internalTrafficPolicy != nil {
  2690  				svc.Spec.InternalTrafficPolicy = tc.internalTrafficPolicy
  2691  			}
  2692  
  2693  			fp.OnServiceAdd(svc)
  2694  
  2695  			endpointSlice := &discovery.EndpointSlice{
  2696  				ObjectMeta: metav1.ObjectMeta{
  2697  					Name:      fmt.Sprintf("%s-1", serviceName),
  2698  					Namespace: namespaceName,
  2699  					Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  2700  				},
  2701  				Ports: []discovery.EndpointPort{{
  2702  					Name:     ptr.To(""),
  2703  					Port:     ptr.To[int32](80),
  2704  					Protocol: ptr.To(v1.ProtocolTCP),
  2705  				}},
  2706  				AddressType: discovery.AddressTypeIPv4,
  2707  			}
  2708  			for _, ep := range tc.endpoints {
  2709  				endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{
  2710  					Addresses:  []string{ep.ip},
  2711  					Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  2712  					NodeName:   ptr.To(ep.hostname),
  2713  				})
  2714  			}
  2715  
  2716  			fp.OnEndpointSliceAdd(endpointSlice)
  2717  			fp.syncProxyRules()
  2718  			runPacketFlowTests(t, tc.line, nft, testNodeIPs, tc.flowTests)
  2719  
  2720  			fp.OnEndpointSliceDelete(endpointSlice)
  2721  			fp.syncProxyRules()
  2722  			runPacketFlowTests(t, tc.line, nft, testNodeIPs, []packetFlowTest{
  2723  				{
  2724  					name:     "endpoints deleted",
  2725  					sourceIP: "10.0.0.2",
  2726  					destIP:   "172.30.1.1",
  2727  					destPort: 80,
  2728  					output:   "REJECT",
  2729  				},
  2730  			})
  2731  		})
  2732  	}
  2733  }
  2734  
  2735  // TestTerminatingEndpointsTrafficPolicyLocal tests that when there are local ready and
  2736  // ready + terminating endpoints, only the ready endpoints are used.
  2737  func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) {
  2738  	service := &v1.Service{
  2739  		ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"},
  2740  		Spec: v1.ServiceSpec{
  2741  			ClusterIP:             "172.30.1.1",
  2742  			Type:                  v1.ServiceTypeLoadBalancer,
  2743  			ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  2744  			Ports: []v1.ServicePort{
  2745  				{
  2746  					Name:       "",
  2747  					TargetPort: intstr.FromInt32(80),
  2748  					Port:       80,
  2749  					Protocol:   v1.ProtocolTCP,
  2750  				},
  2751  			},
  2752  			HealthCheckNodePort: 30000,
  2753  		},
  2754  		Status: v1.ServiceStatus{
  2755  			LoadBalancer: v1.LoadBalancerStatus{
  2756  				Ingress: []v1.LoadBalancerIngress{
  2757  					{IP: "1.2.3.4"},
  2758  				},
  2759  			},
  2760  		},
  2761  	}
  2762  
  2763  	testcases := []struct {
  2764  		name          string
  2765  		line          string
  2766  		endpointslice *discovery.EndpointSlice
  2767  		flowTests     []packetFlowTest
  2768  	}{
  2769  		{
  2770  			name: "ready endpoints exist",
  2771  			line: getLine(),
  2772  			endpointslice: &discovery.EndpointSlice{
  2773  				ObjectMeta: metav1.ObjectMeta{
  2774  					Name:      fmt.Sprintf("%s-1", "svc1"),
  2775  					Namespace: "ns1",
  2776  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  2777  				},
  2778  				Ports: []discovery.EndpointPort{{
  2779  					Name:     ptr.To(""),
  2780  					Port:     ptr.To[int32](80),
  2781  					Protocol: ptr.To(v1.ProtocolTCP),
  2782  				}},
  2783  				AddressType: discovery.AddressTypeIPv4,
  2784  				Endpoints: []discovery.Endpoint{
  2785  					{
  2786  						Addresses: []string{"10.0.1.1"},
  2787  						Conditions: discovery.EndpointConditions{
  2788  							Ready:       ptr.To(true),
  2789  							Serving:     ptr.To(true),
  2790  							Terminating: ptr.To(false),
  2791  						},
  2792  						NodeName: ptr.To(testHostname),
  2793  					},
  2794  					{
  2795  						Addresses: []string{"10.0.1.2"},
  2796  						Conditions: discovery.EndpointConditions{
  2797  							Ready:       ptr.To(true),
  2798  							Serving:     ptr.To(true),
  2799  							Terminating: ptr.To(false),
  2800  						},
  2801  						NodeName: ptr.To(testHostname),
  2802  					},
  2803  					{
  2804  						// this endpoint should be ignored for external since there are ready non-terminating endpoints
  2805  						Addresses: []string{"10.0.1.3"},
  2806  						Conditions: discovery.EndpointConditions{
  2807  							Ready:       ptr.To(false),
  2808  							Serving:     ptr.To(true),
  2809  							Terminating: ptr.To(true),
  2810  						},
  2811  						NodeName: ptr.To(testHostname),
  2812  					},
  2813  					{
  2814  						// this endpoint should be ignored for external since there are ready non-terminating endpoints
  2815  						Addresses: []string{"10.0.1.4"},
  2816  						Conditions: discovery.EndpointConditions{
  2817  							Ready:       ptr.To(false),
  2818  							Serving:     ptr.To(false),
  2819  							Terminating: ptr.To(true),
  2820  						},
  2821  						NodeName: ptr.To(testHostname),
  2822  					},
  2823  					{
  2824  						// this endpoint should be ignored for external since it's not local
  2825  						Addresses: []string{"10.0.1.5"},
  2826  						Conditions: discovery.EndpointConditions{
  2827  							Ready:       ptr.To(true),
  2828  							Serving:     ptr.To(true),
  2829  							Terminating: ptr.To(false),
  2830  						},
  2831  						NodeName: ptr.To("host-1"),
  2832  					},
  2833  				},
  2834  			},
  2835  			flowTests: []packetFlowTest{
  2836  				{
  2837  					name:     "pod to clusterIP",
  2838  					sourceIP: "10.0.0.2",
  2839  					destIP:   "172.30.1.1",
  2840  					destPort: 80,
  2841  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
  2842  					masq:     false,
  2843  				},
  2844  				{
  2845  					name:     "external to LB",
  2846  					sourceIP: testExternalClient,
  2847  					destIP:   "1.2.3.4",
  2848  					destPort: 80,
  2849  					output:   "10.0.1.1:80, 10.0.1.2:80",
  2850  					masq:     false,
  2851  				},
  2852  			},
  2853  		},
  2854  		{
  2855  			name: "only terminating endpoints exist",
  2856  			line: getLine(),
  2857  			endpointslice: &discovery.EndpointSlice{
  2858  				ObjectMeta: metav1.ObjectMeta{
  2859  					Name:      fmt.Sprintf("%s-1", "svc1"),
  2860  					Namespace: "ns1",
  2861  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  2862  				},
  2863  				Ports: []discovery.EndpointPort{{
  2864  					Name:     ptr.To(""),
  2865  					Port:     ptr.To[int32](80),
  2866  					Protocol: ptr.To(v1.ProtocolTCP),
  2867  				}},
  2868  				AddressType: discovery.AddressTypeIPv4,
  2869  				Endpoints: []discovery.Endpoint{
  2870  					{
  2871  						// this endpoint should be used since there are only ready terminating endpoints
  2872  						Addresses: []string{"10.0.1.2"},
  2873  						Conditions: discovery.EndpointConditions{
  2874  							Ready:       ptr.To(false),
  2875  							Serving:     ptr.To(true),
  2876  							Terminating: ptr.To(true),
  2877  						},
  2878  						NodeName: ptr.To(testHostname),
  2879  					},
  2880  					{
  2881  						// this endpoint should be used since there are only ready terminating endpoints
  2882  						Addresses: []string{"10.0.1.3"},
  2883  						Conditions: discovery.EndpointConditions{
  2884  							Ready:       ptr.To(false),
  2885  							Serving:     ptr.To(true),
  2886  							Terminating: ptr.To(true),
  2887  						},
  2888  						NodeName: ptr.To(testHostname),
  2889  					},
  2890  					{
  2891  						// this endpoint should not be used since it is both terminating and not ready.
  2892  						Addresses: []string{"10.0.1.4"},
  2893  						Conditions: discovery.EndpointConditions{
  2894  							Ready:       ptr.To(false),
  2895  							Serving:     ptr.To(false),
  2896  							Terminating: ptr.To(true),
  2897  						},
  2898  						NodeName: ptr.To(testHostname),
  2899  					},
  2900  					{
  2901  						// this endpoint should be ignored for external since it's not local
  2902  						Addresses: []string{"10.0.1.5"},
  2903  						Conditions: discovery.EndpointConditions{
  2904  							Ready:       ptr.To(true),
  2905  							Serving:     ptr.To(true),
  2906  							Terminating: ptr.To(false),
  2907  						},
  2908  						NodeName: ptr.To("host-1"),
  2909  					},
  2910  				},
  2911  			},
  2912  			flowTests: []packetFlowTest{
  2913  				{
  2914  					name:     "pod to clusterIP",
  2915  					sourceIP: "10.0.0.2",
  2916  					destIP:   "172.30.1.1",
  2917  					destPort: 80,
  2918  					output:   "10.0.1.5:80",
  2919  					masq:     false,
  2920  				},
  2921  				{
  2922  					name:     "external to LB",
  2923  					sourceIP: testExternalClient,
  2924  					destIP:   "1.2.3.4",
  2925  					destPort: 80,
  2926  					output:   "10.0.1.2:80, 10.0.1.3:80",
  2927  					masq:     false,
  2928  				},
  2929  			},
  2930  		},
  2931  		{
  2932  			name: "terminating endpoints on remote node",
  2933  			line: getLine(),
  2934  			endpointslice: &discovery.EndpointSlice{
  2935  				ObjectMeta: metav1.ObjectMeta{
  2936  					Name:      fmt.Sprintf("%s-1", "svc1"),
  2937  					Namespace: "ns1",
  2938  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  2939  				},
  2940  				Ports: []discovery.EndpointPort{{
  2941  					Name:     ptr.To(""),
  2942  					Port:     ptr.To[int32](80),
  2943  					Protocol: ptr.To(v1.ProtocolTCP),
  2944  				}},
  2945  				AddressType: discovery.AddressTypeIPv4,
  2946  				Endpoints: []discovery.Endpoint{
  2947  					{
  2948  						// this endpoint won't be used because it's not local,
  2949  						// but it will prevent a REJECT rule from being created
  2950  						Addresses: []string{"10.0.1.5"},
  2951  						Conditions: discovery.EndpointConditions{
  2952  							Ready:       ptr.To(false),
  2953  							Serving:     ptr.To(true),
  2954  							Terminating: ptr.To(true),
  2955  						},
  2956  						NodeName: ptr.To("host-1"),
  2957  					},
  2958  				},
  2959  			},
  2960  			flowTests: []packetFlowTest{
  2961  				{
  2962  					name:     "pod to clusterIP",
  2963  					sourceIP: "10.0.0.2",
  2964  					destIP:   "172.30.1.1",
  2965  					destPort: 80,
  2966  					output:   "10.0.1.5:80",
  2967  				},
  2968  				{
  2969  					name:     "external to LB, no locally-usable endpoints",
  2970  					sourceIP: testExternalClient,
  2971  					destIP:   "1.2.3.4",
  2972  					destPort: 80,
  2973  					output:   "DROP",
  2974  				},
  2975  			},
  2976  		},
  2977  		{
  2978  			name: "no usable endpoints on any node",
  2979  			line: getLine(),
  2980  			endpointslice: &discovery.EndpointSlice{
  2981  				ObjectMeta: metav1.ObjectMeta{
  2982  					Name:      fmt.Sprintf("%s-1", "svc1"),
  2983  					Namespace: "ns1",
  2984  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  2985  				},
  2986  				Ports: []discovery.EndpointPort{{
  2987  					Name:     ptr.To(""),
  2988  					Port:     ptr.To[int32](80),
  2989  					Protocol: ptr.To(v1.ProtocolTCP),
  2990  				}},
  2991  				AddressType: discovery.AddressTypeIPv4,
  2992  				Endpoints: []discovery.Endpoint{
  2993  					{
  2994  						// Local but not ready or serving
  2995  						Addresses: []string{"10.0.1.5"},
  2996  						Conditions: discovery.EndpointConditions{
  2997  							Ready:       ptr.To(false),
  2998  							Serving:     ptr.To(false),
  2999  							Terminating: ptr.To(true),
  3000  						},
  3001  						NodeName: ptr.To(testHostname),
  3002  					},
  3003  					{
  3004  						// Remote and not ready or serving
  3005  						Addresses: []string{"10.0.1.5"},
  3006  						Conditions: discovery.EndpointConditions{
  3007  							Ready:       ptr.To(false),
  3008  							Serving:     ptr.To(false),
  3009  							Terminating: ptr.To(true),
  3010  						},
  3011  						NodeName: ptr.To("host-1"),
  3012  					},
  3013  				},
  3014  			},
  3015  			flowTests: []packetFlowTest{
  3016  				{
  3017  					name:     "pod to clusterIP, no usable endpoints",
  3018  					sourceIP: "10.0.0.2",
  3019  					destIP:   "172.30.1.1",
  3020  					destPort: 80,
  3021  					output:   "REJECT",
  3022  				},
  3023  				{
  3024  					name:     "external to LB, no usable endpoints",
  3025  					sourceIP: testExternalClient,
  3026  					destIP:   "1.2.3.4",
  3027  					destPort: 80,
  3028  					output:   "REJECT",
  3029  				},
  3030  			},
  3031  		},
  3032  	}
  3033  
  3034  	for _, testcase := range testcases {
  3035  		t.Run(testcase.name, func(t *testing.T) {
  3036  			nft, fp := NewFakeProxier(v1.IPv4Protocol)
  3037  			fp.OnServiceSynced()
  3038  			fp.OnEndpointSlicesSynced()
  3039  
  3040  			fp.OnServiceAdd(service)
  3041  
  3042  			fp.OnEndpointSliceAdd(testcase.endpointslice)
  3043  			fp.syncProxyRules()
  3044  			runPacketFlowTests(t, testcase.line, nft, testNodeIPs, testcase.flowTests)
  3045  
  3046  			fp.OnEndpointSliceDelete(testcase.endpointslice)
  3047  			fp.syncProxyRules()
  3048  			runPacketFlowTests(t, testcase.line, nft, testNodeIPs, []packetFlowTest{
  3049  				{
  3050  					name:     "pod to clusterIP after endpoints deleted",
  3051  					sourceIP: "10.0.0.2",
  3052  					destIP:   "172.30.1.1",
  3053  					destPort: 80,
  3054  					output:   "REJECT",
  3055  				},
  3056  				{
  3057  					name:     "external to LB after endpoints deleted",
  3058  					sourceIP: testExternalClient,
  3059  					destIP:   "1.2.3.4",
  3060  					destPort: 80,
  3061  					output:   "REJECT",
  3062  				},
  3063  			})
  3064  		})
  3065  	}
  3066  }
  3067  
  3068  // TestTerminatingEndpointsTrafficPolicyCluster tests that when there are cluster-wide
  3069  // ready and ready + terminating endpoints, only the ready endpoints are used.
  3070  func TestTerminatingEndpointsTrafficPolicyCluster(t *testing.T) {
  3071  	service := &v1.Service{
  3072  		ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"},
  3073  		Spec: v1.ServiceSpec{
  3074  			ClusterIP:             "172.30.1.1",
  3075  			Type:                  v1.ServiceTypeLoadBalancer,
  3076  			ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyCluster,
  3077  			Ports: []v1.ServicePort{
  3078  				{
  3079  					Name:       "",
  3080  					TargetPort: intstr.FromInt32(80),
  3081  					Port:       80,
  3082  					Protocol:   v1.ProtocolTCP,
  3083  				},
  3084  			},
  3085  			HealthCheckNodePort: 30000,
  3086  		},
  3087  		Status: v1.ServiceStatus{
  3088  			LoadBalancer: v1.LoadBalancerStatus{
  3089  				Ingress: []v1.LoadBalancerIngress{
  3090  					{IP: "1.2.3.4"},
  3091  				},
  3092  			},
  3093  		},
  3094  	}
  3095  
  3096  	testcases := []struct {
  3097  		name          string
  3098  		line          string
  3099  		endpointslice *discovery.EndpointSlice
  3100  		flowTests     []packetFlowTest
  3101  	}{
  3102  		{
  3103  			name: "ready endpoints exist",
  3104  			line: getLine(),
  3105  			endpointslice: &discovery.EndpointSlice{
  3106  				ObjectMeta: metav1.ObjectMeta{
  3107  					Name:      fmt.Sprintf("%s-1", "svc1"),
  3108  					Namespace: "ns1",
  3109  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  3110  				},
  3111  				Ports: []discovery.EndpointPort{{
  3112  					Name:     ptr.To(""),
  3113  					Port:     ptr.To[int32](80),
  3114  					Protocol: ptr.To(v1.ProtocolTCP),
  3115  				}},
  3116  				AddressType: discovery.AddressTypeIPv4,
  3117  				Endpoints: []discovery.Endpoint{
  3118  					{
  3119  						Addresses: []string{"10.0.1.1"},
  3120  						Conditions: discovery.EndpointConditions{
  3121  							Ready:       ptr.To(true),
  3122  							Serving:     ptr.To(true),
  3123  							Terminating: ptr.To(false),
  3124  						},
  3125  						NodeName: ptr.To(testHostname),
  3126  					},
  3127  					{
  3128  						Addresses: []string{"10.0.1.2"},
  3129  						Conditions: discovery.EndpointConditions{
  3130  							Ready:       ptr.To(true),
  3131  							Serving:     ptr.To(true),
  3132  							Terminating: ptr.To(false),
  3133  						},
  3134  						NodeName: ptr.To(testHostname),
  3135  					},
  3136  					{
  3137  						// this endpoint should be ignored since there are ready non-terminating endpoints
  3138  						Addresses: []string{"10.0.1.3"},
  3139  						Conditions: discovery.EndpointConditions{
  3140  							Ready:       ptr.To(false),
  3141  							Serving:     ptr.To(true),
  3142  							Terminating: ptr.To(true),
  3143  						},
  3144  						NodeName: ptr.To("another-host"),
  3145  					},
  3146  					{
  3147  						// this endpoint should be ignored since it is not "serving"
  3148  						Addresses: []string{"10.0.1.4"},
  3149  						Conditions: discovery.EndpointConditions{
  3150  							Ready:       ptr.To(false),
  3151  							Serving:     ptr.To(false),
  3152  							Terminating: ptr.To(true),
  3153  						},
  3154  						NodeName: ptr.To("another-host"),
  3155  					},
  3156  					{
  3157  						Addresses: []string{"10.0.1.5"},
  3158  						Conditions: discovery.EndpointConditions{
  3159  							Ready:       ptr.To(true),
  3160  							Serving:     ptr.To(true),
  3161  							Terminating: ptr.To(false),
  3162  						},
  3163  						NodeName: ptr.To("another-host"),
  3164  					},
  3165  				},
  3166  			},
  3167  			flowTests: []packetFlowTest{
  3168  				{
  3169  					name:     "pod to clusterIP",
  3170  					sourceIP: "10.0.0.2",
  3171  					destIP:   "172.30.1.1",
  3172  					destPort: 80,
  3173  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
  3174  					masq:     false,
  3175  				},
  3176  				{
  3177  					name:     "external to LB",
  3178  					sourceIP: testExternalClient,
  3179  					destIP:   "1.2.3.4",
  3180  					destPort: 80,
  3181  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
  3182  					masq:     true,
  3183  				},
  3184  			},
  3185  		},
  3186  		{
  3187  			name: "only terminating endpoints exist",
  3188  			line: getLine(),
  3189  			endpointslice: &discovery.EndpointSlice{
  3190  				ObjectMeta: metav1.ObjectMeta{
  3191  					Name:      fmt.Sprintf("%s-1", "svc1"),
  3192  					Namespace: "ns1",
  3193  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  3194  				},
  3195  				Ports: []discovery.EndpointPort{{
  3196  					Name:     ptr.To(""),
  3197  					Port:     ptr.To[int32](80),
  3198  					Protocol: ptr.To(v1.ProtocolTCP),
  3199  				}},
  3200  				AddressType: discovery.AddressTypeIPv4,
  3201  				Endpoints: []discovery.Endpoint{
  3202  					{
  3203  						// this endpoint should be used since there are only ready terminating endpoints
  3204  						Addresses: []string{"10.0.1.2"},
  3205  						Conditions: discovery.EndpointConditions{
  3206  							Ready:       ptr.To(false),
  3207  							Serving:     ptr.To(true),
  3208  							Terminating: ptr.To(true),
  3209  						},
  3210  						NodeName: ptr.To(testHostname),
  3211  					},
  3212  					{
  3213  						// this endpoint should be used since there are only ready terminating endpoints
  3214  						Addresses: []string{"10.0.1.3"},
  3215  						Conditions: discovery.EndpointConditions{
  3216  							Ready:       ptr.To(false),
  3217  							Serving:     ptr.To(true),
  3218  							Terminating: ptr.To(true),
  3219  						},
  3220  						NodeName: ptr.To(testHostname),
  3221  					},
  3222  					{
  3223  						// this endpoint should not be used since it is both terminating and not ready.
  3224  						Addresses: []string{"10.0.1.4"},
  3225  						Conditions: discovery.EndpointConditions{
  3226  							Ready:       ptr.To(false),
  3227  							Serving:     ptr.To(false),
  3228  							Terminating: ptr.To(true),
  3229  						},
  3230  						NodeName: ptr.To("another-host"),
  3231  					},
  3232  					{
  3233  						// this endpoint should be used since there are only ready terminating endpoints
  3234  						Addresses: []string{"10.0.1.5"},
  3235  						Conditions: discovery.EndpointConditions{
  3236  							Ready:       ptr.To(false),
  3237  							Serving:     ptr.To(true),
  3238  							Terminating: ptr.To(true),
  3239  						},
  3240  						NodeName: ptr.To("another-host"),
  3241  					},
  3242  				},
  3243  			},
  3244  			flowTests: []packetFlowTest{
  3245  				{
  3246  					name:     "pod to clusterIP",
  3247  					sourceIP: "10.0.0.2",
  3248  					destIP:   "172.30.1.1",
  3249  					destPort: 80,
  3250  					output:   "10.0.1.2:80, 10.0.1.3:80, 10.0.1.5:80",
  3251  					masq:     false,
  3252  				},
  3253  				{
  3254  					name:     "external to LB",
  3255  					sourceIP: testExternalClient,
  3256  					destIP:   "1.2.3.4",
  3257  					destPort: 80,
  3258  					output:   "10.0.1.2:80, 10.0.1.3:80, 10.0.1.5:80",
  3259  					masq:     true,
  3260  				},
  3261  			},
  3262  		},
  3263  		{
  3264  			name: "terminating endpoints on remote node",
  3265  			line: getLine(),
  3266  			endpointslice: &discovery.EndpointSlice{
  3267  				ObjectMeta: metav1.ObjectMeta{
  3268  					Name:      fmt.Sprintf("%s-1", "svc1"),
  3269  					Namespace: "ns1",
  3270  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  3271  				},
  3272  				Ports: []discovery.EndpointPort{{
  3273  					Name:     ptr.To(""),
  3274  					Port:     ptr.To[int32](80),
  3275  					Protocol: ptr.To(v1.ProtocolTCP),
  3276  				}},
  3277  				AddressType: discovery.AddressTypeIPv4,
  3278  				Endpoints: []discovery.Endpoint{
  3279  					{
  3280  						Addresses: []string{"10.0.1.5"},
  3281  						Conditions: discovery.EndpointConditions{
  3282  							Ready:       ptr.To(false),
  3283  							Serving:     ptr.To(true),
  3284  							Terminating: ptr.To(true),
  3285  						},
  3286  						NodeName: ptr.To("host-1"),
  3287  					},
  3288  				},
  3289  			},
  3290  			flowTests: []packetFlowTest{
  3291  				{
  3292  					name:     "pod to clusterIP",
  3293  					sourceIP: "10.0.0.2",
  3294  					destIP:   "172.30.1.1",
  3295  					destPort: 80,
  3296  					output:   "10.0.1.5:80",
  3297  					masq:     false,
  3298  				},
  3299  				{
  3300  					name:     "external to LB",
  3301  					sourceIP: testExternalClient,
  3302  					destIP:   "1.2.3.4",
  3303  					destPort: 80,
  3304  					output:   "10.0.1.5:80",
  3305  					masq:     true,
  3306  				},
  3307  			},
  3308  		},
  3309  		{
  3310  			name: "no usable endpoints on any node",
  3311  			line: getLine(),
  3312  			endpointslice: &discovery.EndpointSlice{
  3313  				ObjectMeta: metav1.ObjectMeta{
  3314  					Name:      fmt.Sprintf("%s-1", "svc1"),
  3315  					Namespace: "ns1",
  3316  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  3317  				},
  3318  				Ports: []discovery.EndpointPort{{
  3319  					Name:     ptr.To(""),
  3320  					Port:     ptr.To[int32](80),
  3321  					Protocol: ptr.To(v1.ProtocolTCP),
  3322  				}},
  3323  				AddressType: discovery.AddressTypeIPv4,
  3324  				Endpoints: []discovery.Endpoint{
  3325  					{
  3326  						// Local, not ready or serving
  3327  						Addresses: []string{"10.0.1.5"},
  3328  						Conditions: discovery.EndpointConditions{
  3329  							Ready:       ptr.To(false),
  3330  							Serving:     ptr.To(false),
  3331  							Terminating: ptr.To(true),
  3332  						},
  3333  						NodeName: ptr.To(testHostname),
  3334  					},
  3335  					{
  3336  						// Remote, not ready or serving
  3337  						Addresses: []string{"10.0.1.5"},
  3338  						Conditions: discovery.EndpointConditions{
  3339  							Ready:       ptr.To(false),
  3340  							Serving:     ptr.To(false),
  3341  							Terminating: ptr.To(true),
  3342  						},
  3343  						NodeName: ptr.To("host-1"),
  3344  					},
  3345  				},
  3346  			},
  3347  			flowTests: []packetFlowTest{
  3348  				{
  3349  					name:     "pod to clusterIP",
  3350  					sourceIP: "10.0.0.2",
  3351  					destIP:   "172.30.1.1",
  3352  					destPort: 80,
  3353  					output:   "REJECT",
  3354  				},
  3355  				{
  3356  					name:     "external to LB",
  3357  					sourceIP: testExternalClient,
  3358  					destIP:   "1.2.3.4",
  3359  					destPort: 80,
  3360  					output:   "REJECT",
  3361  				},
  3362  			},
  3363  		},
  3364  	}
  3365  
  3366  	for _, testcase := range testcases {
  3367  		t.Run(testcase.name, func(t *testing.T) {
  3368  
  3369  			nft, fp := NewFakeProxier(v1.IPv4Protocol)
  3370  			fp.OnServiceSynced()
  3371  			fp.OnEndpointSlicesSynced()
  3372  
  3373  			fp.OnServiceAdd(service)
  3374  
  3375  			fp.OnEndpointSliceAdd(testcase.endpointslice)
  3376  			fp.syncProxyRules()
  3377  			runPacketFlowTests(t, testcase.line, nft, testNodeIPs, testcase.flowTests)
  3378  
  3379  			fp.OnEndpointSliceDelete(testcase.endpointslice)
  3380  			fp.syncProxyRules()
  3381  			runPacketFlowTests(t, testcase.line, nft, testNodeIPs, []packetFlowTest{
  3382  				{
  3383  					name:     "pod to clusterIP after endpoints deleted",
  3384  					sourceIP: "10.0.0.2",
  3385  					destIP:   "172.30.1.1",
  3386  					destPort: 80,
  3387  					output:   "REJECT",
  3388  				},
  3389  				{
  3390  					name:     "external to LB after endpoints deleted",
  3391  					sourceIP: testExternalClient,
  3392  					destIP:   "1.2.3.4",
  3393  					destPort: 80,
  3394  					output:   "REJECT",
  3395  				},
  3396  			})
  3397  		})
  3398  	}
  3399  }
  3400  
  3401  func TestInternalExternalMasquerade(t *testing.T) {
  3402  	// (Put the test setup code in an internal function so we can have it here at the
  3403  	// top, before the test cases that will be run against it.)
  3404  	setupTest := func(fp *Proxier) {
  3405  		makeServiceMap(fp,
  3406  			makeTestService("ns1", "svc1", func(svc *v1.Service) {
  3407  				svc.Spec.Type = "LoadBalancer"
  3408  				svc.Spec.ClusterIP = "172.30.0.41"
  3409  				svc.Spec.Ports = []v1.ServicePort{{
  3410  					Name:     "p80",
  3411  					Port:     80,
  3412  					Protocol: v1.ProtocolTCP,
  3413  					NodePort: int32(3001),
  3414  				}}
  3415  				svc.Spec.HealthCheckNodePort = 30001
  3416  				svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  3417  					IP: "1.2.3.4",
  3418  				}}
  3419  			}),
  3420  			makeTestService("ns2", "svc2", func(svc *v1.Service) {
  3421  				svc.Spec.Type = "LoadBalancer"
  3422  				svc.Spec.ClusterIP = "172.30.0.42"
  3423  				svc.Spec.Ports = []v1.ServicePort{{
  3424  					Name:     "p80",
  3425  					Port:     80,
  3426  					Protocol: v1.ProtocolTCP,
  3427  					NodePort: int32(3002),
  3428  				}}
  3429  				svc.Spec.HealthCheckNodePort = 30002
  3430  				svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  3431  				svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  3432  					IP: "5.6.7.8",
  3433  				}}
  3434  			}),
  3435  			makeTestService("ns3", "svc3", func(svc *v1.Service) {
  3436  				svc.Spec.Type = "LoadBalancer"
  3437  				svc.Spec.ClusterIP = "172.30.0.43"
  3438  				svc.Spec.Ports = []v1.ServicePort{{
  3439  					Name:     "p80",
  3440  					Port:     80,
  3441  					Protocol: v1.ProtocolTCP,
  3442  					NodePort: int32(3003),
  3443  				}}
  3444  				svc.Spec.HealthCheckNodePort = 30003
  3445  				svc.Spec.InternalTrafficPolicy = ptr.To(v1.ServiceInternalTrafficPolicyLocal)
  3446  				svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  3447  					IP: "9.10.11.12",
  3448  				}}
  3449  			}),
  3450  		)
  3451  
  3452  		populateEndpointSlices(fp,
  3453  			makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  3454  				eps.AddressType = discovery.AddressTypeIPv4
  3455  				eps.Endpoints = []discovery.Endpoint{
  3456  					{
  3457  						Addresses: []string{"10.180.0.1"},
  3458  						NodeName:  ptr.To(testHostname),
  3459  					},
  3460  					{
  3461  						Addresses: []string{"10.180.1.1"},
  3462  						NodeName:  ptr.To("remote"),
  3463  					},
  3464  				}
  3465  				eps.Ports = []discovery.EndpointPort{{
  3466  					Name:     ptr.To("p80"),
  3467  					Port:     ptr.To[int32](80),
  3468  					Protocol: ptr.To(v1.ProtocolTCP),
  3469  				}}
  3470  			}),
  3471  			makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  3472  				eps.AddressType = discovery.AddressTypeIPv4
  3473  				eps.Endpoints = []discovery.Endpoint{
  3474  					{
  3475  						Addresses: []string{"10.180.0.2"},
  3476  						NodeName:  ptr.To(testHostname),
  3477  					},
  3478  					{
  3479  						Addresses: []string{"10.180.1.2"},
  3480  						NodeName:  ptr.To("remote"),
  3481  					},
  3482  				}
  3483  				eps.Ports = []discovery.EndpointPort{{
  3484  					Name:     ptr.To("p80"),
  3485  					Port:     ptr.To[int32](80),
  3486  					Protocol: ptr.To(v1.ProtocolTCP),
  3487  				}}
  3488  			}),
  3489  			makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
  3490  				eps.AddressType = discovery.AddressTypeIPv4
  3491  				eps.Endpoints = []discovery.Endpoint{
  3492  					{
  3493  						Addresses: []string{"10.180.0.3"},
  3494  						NodeName:  ptr.To(testHostname),
  3495  					},
  3496  					{
  3497  						Addresses: []string{"10.180.1.3"},
  3498  						NodeName:  ptr.To("remote"),
  3499  					},
  3500  				}
  3501  				eps.Ports = []discovery.EndpointPort{{
  3502  					Name:     ptr.To("p80"),
  3503  					Port:     ptr.To[int32](80),
  3504  					Protocol: ptr.To(v1.ProtocolTCP),
  3505  				}}
  3506  			}),
  3507  		)
  3508  
  3509  		fp.syncProxyRules()
  3510  	}
  3511  
  3512  	// We use the same flowTests for all of the testCases. The "output" and "masq"
  3513  	// values here represent the normal case (working localDetector, no masqueradeAll)
  3514  	flowTests := []packetFlowTest{
  3515  		{
  3516  			name:     "pod to ClusterIP",
  3517  			sourceIP: "10.0.0.2",
  3518  			destIP:   "172.30.0.41",
  3519  			destPort: 80,
  3520  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3521  			masq:     false,
  3522  		},
  3523  		{
  3524  			name:     "pod to NodePort",
  3525  			sourceIP: "10.0.0.2",
  3526  			destIP:   testNodeIP,
  3527  			destPort: 3001,
  3528  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3529  			masq:     true,
  3530  		},
  3531  		{
  3532  			name:     "pod to LB",
  3533  			sourceIP: "10.0.0.2",
  3534  			destIP:   "1.2.3.4",
  3535  			destPort: 80,
  3536  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3537  			masq:     true,
  3538  		},
  3539  		{
  3540  			name:     "node to ClusterIP",
  3541  			sourceIP: testNodeIP,
  3542  			destIP:   "172.30.0.41",
  3543  			destPort: 80,
  3544  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3545  			masq:     true,
  3546  		},
  3547  		{
  3548  			name:     "node to NodePort",
  3549  			sourceIP: testNodeIP,
  3550  			destIP:   testNodeIP,
  3551  			destPort: 3001,
  3552  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3553  			masq:     true,
  3554  		},
  3555  		{
  3556  			name:     "node to LB",
  3557  			sourceIP: testNodeIP,
  3558  			destIP:   "1.2.3.4",
  3559  			destPort: 80,
  3560  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3561  			masq:     true,
  3562  		},
  3563  		{
  3564  			name:     "external to ClusterIP",
  3565  			sourceIP: testExternalClient,
  3566  			destIP:   "172.30.0.41",
  3567  			destPort: 80,
  3568  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3569  			masq:     true,
  3570  		},
  3571  		{
  3572  			name:     "external to NodePort",
  3573  			sourceIP: testExternalClient,
  3574  			destIP:   testNodeIP,
  3575  			destPort: 3001,
  3576  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3577  			masq:     true,
  3578  		},
  3579  		{
  3580  			name:     "external to LB",
  3581  			sourceIP: testExternalClient,
  3582  			destIP:   "1.2.3.4",
  3583  			destPort: 80,
  3584  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3585  			masq:     true,
  3586  		},
  3587  		{
  3588  			name:     "pod to ClusterIP with eTP:Local",
  3589  			sourceIP: "10.0.0.2",
  3590  			destIP:   "172.30.0.42",
  3591  			destPort: 80,
  3592  
  3593  			// externalTrafficPolicy does not apply to ClusterIP traffic, so same
  3594  			// as "Pod to ClusterIP"
  3595  			output: "10.180.0.2:80, 10.180.1.2:80",
  3596  			masq:   false,
  3597  		},
  3598  		{
  3599  			name:     "pod to NodePort with eTP:Local",
  3600  			sourceIP: "10.0.0.2",
  3601  			destIP:   testNodeIP,
  3602  			destPort: 3002,
  3603  
  3604  			// See the comment below in the "pod to LB with eTP:Local" case.
  3605  			// It doesn't actually make sense to short-circuit here, since if
  3606  			// you connect directly to a NodePort from outside the cluster,
  3607  			// you only get the local endpoints. But it's simpler for us and
  3608  			// slightly more convenient for users to have this case get
  3609  			// short-circuited too.
  3610  			output: "10.180.0.2:80, 10.180.1.2:80",
  3611  			masq:   false,
  3612  		},
  3613  		{
  3614  			name:     "pod to LB with eTP:Local",
  3615  			sourceIP: "10.0.0.2",
  3616  			destIP:   "5.6.7.8",
  3617  			destPort: 80,
  3618  
  3619  			// The short-circuit rule is supposed to make this behave the same
  3620  			// way it would if the packet actually went out to the LB and then
  3621  			// came back into the cluster. So it gets routed to all endpoints,
  3622  			// not just local ones. In reality, if the packet actually left
  3623  			// the cluster, it would have to get masqueraded, but since we can
  3624  			// avoid doing that in the short-circuit case, and not masquerading
  3625  			// is more useful, we avoid masquerading.
  3626  			output: "10.180.0.2:80, 10.180.1.2:80",
  3627  			masq:   false,
  3628  		},
  3629  		{
  3630  			name:     "node to ClusterIP with eTP:Local",
  3631  			sourceIP: testNodeIP,
  3632  			destIP:   "172.30.0.42",
  3633  			destPort: 80,
  3634  
  3635  			// externalTrafficPolicy does not apply to ClusterIP traffic, so same
  3636  			// as "node to ClusterIP"
  3637  			output: "10.180.0.2:80, 10.180.1.2:80",
  3638  			masq:   true,
  3639  		},
  3640  		{
  3641  			name:     "node to NodePort with eTP:Local",
  3642  			sourceIP: testNodeIP,
  3643  			destIP:   testNodeIP,
  3644  			destPort: 3001,
  3645  
  3646  			// The traffic gets short-circuited, ignoring externalTrafficPolicy, so
  3647  			// same as "node to NodePort" above.
  3648  			output: "10.180.0.1:80, 10.180.1.1:80",
  3649  			masq:   true,
  3650  		},
  3651  		{
  3652  			name:     "node to LB with eTP:Local",
  3653  			sourceIP: testNodeIP,
  3654  			destIP:   "5.6.7.8",
  3655  			destPort: 80,
  3656  
  3657  			// The traffic gets short-circuited, ignoring externalTrafficPolicy, so
  3658  			// same as "node to LB" above.
  3659  			output: "10.180.0.2:80, 10.180.1.2:80",
  3660  			masq:   true,
  3661  		},
  3662  		{
  3663  			name:     "external to ClusterIP with eTP:Local",
  3664  			sourceIP: testExternalClient,
  3665  			destIP:   "172.30.0.42",
  3666  			destPort: 80,
  3667  
  3668  			// externalTrafficPolicy does not apply to ClusterIP traffic, so same
  3669  			// as "external to ClusterIP" above.
  3670  			output: "10.180.0.2:80, 10.180.1.2:80",
  3671  			masq:   true,
  3672  		},
  3673  		{
  3674  			name:     "external to NodePort with eTP:Local",
  3675  			sourceIP: testExternalClient,
  3676  			destIP:   testNodeIP,
  3677  			destPort: 3002,
  3678  
  3679  			// externalTrafficPolicy applies; only the local endpoint is
  3680  			// selected, and we don't masquerade.
  3681  			output: "10.180.0.2:80",
  3682  			masq:   false,
  3683  		},
  3684  		{
  3685  			name:     "external to LB with eTP:Local",
  3686  			sourceIP: testExternalClient,
  3687  			destIP:   "5.6.7.8",
  3688  			destPort: 80,
  3689  
  3690  			// externalTrafficPolicy applies; only the local endpoint is
  3691  			// selected, and we don't masquerade.
  3692  			output: "10.180.0.2:80",
  3693  			masq:   false,
  3694  		},
  3695  		{
  3696  			name:     "pod to ClusterIP with iTP:Local",
  3697  			sourceIP: "10.0.0.2",
  3698  			destIP:   "172.30.0.43",
  3699  			destPort: 80,
  3700  
  3701  			// internalTrafficPolicy applies; only the local endpoint is
  3702  			// selected.
  3703  			output: "10.180.0.3:80",
  3704  			masq:   false,
  3705  		},
  3706  		{
  3707  			name:     "pod to NodePort with iTP:Local",
  3708  			sourceIP: "10.0.0.2",
  3709  			destIP:   testNodeIP,
  3710  			destPort: 3003,
  3711  
  3712  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  3713  			// "pod to NodePort" above.
  3714  			output: "10.180.0.3:80, 10.180.1.3:80",
  3715  			masq:   true,
  3716  		},
  3717  		{
  3718  			name:     "pod to LB with iTP:Local",
  3719  			sourceIP: "10.0.0.2",
  3720  			destIP:   "9.10.11.12",
  3721  			destPort: 80,
  3722  
  3723  			// internalTrafficPolicy does not apply to LoadBalancer traffic, so
  3724  			// same as "pod to LB" above.
  3725  			output: "10.180.0.3:80, 10.180.1.3:80",
  3726  			masq:   true,
  3727  		},
  3728  		{
  3729  			name:     "node to ClusterIP with iTP:Local",
  3730  			sourceIP: testNodeIP,
  3731  			destIP:   "172.30.0.43",
  3732  			destPort: 80,
  3733  
  3734  			// internalTrafficPolicy applies; only the local endpoint is selected.
  3735  			// Traffic is masqueraded as in the "node to ClusterIP" case because
  3736  			// internalTrafficPolicy does not affect masquerading.
  3737  			output: "10.180.0.3:80",
  3738  			masq:   true,
  3739  		},
  3740  		{
  3741  			name:     "node to NodePort with iTP:Local",
  3742  			sourceIP: testNodeIP,
  3743  			destIP:   testNodeIP,
  3744  			destPort: 3003,
  3745  
  3746  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  3747  			// "node to NodePort" above.
  3748  			output: "10.180.0.3:80, 10.180.1.3:80",
  3749  			masq:   true,
  3750  		},
  3751  		{
  3752  			name:     "node to LB with iTP:Local",
  3753  			sourceIP: testNodeIP,
  3754  			destIP:   "9.10.11.12",
  3755  			destPort: 80,
  3756  
  3757  			// internalTrafficPolicy does not apply to LoadBalancer traffic, so
  3758  			// same as "node to LB" above.
  3759  			output: "10.180.0.3:80, 10.180.1.3:80",
  3760  			masq:   true,
  3761  		},
  3762  		{
  3763  			name:     "external to ClusterIP with iTP:Local",
  3764  			sourceIP: testExternalClient,
  3765  			destIP:   "172.30.0.43",
  3766  			destPort: 80,
  3767  
  3768  			// internalTrafficPolicy applies; only the local endpoint is selected.
  3769  			// Traffic is masqueraded as in the "external to ClusterIP" case
  3770  			// because internalTrafficPolicy does not affect masquerading.
  3771  			output: "10.180.0.3:80",
  3772  			masq:   true,
  3773  		},
  3774  		{
  3775  			name:     "external to NodePort with iTP:Local",
  3776  			sourceIP: testExternalClient,
  3777  			destIP:   testNodeIP,
  3778  			destPort: 3003,
  3779  
  3780  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  3781  			// "external to NodePort" above.
  3782  			output: "10.180.0.3:80, 10.180.1.3:80",
  3783  			masq:   true,
  3784  		},
  3785  		{
  3786  			name:     "external to LB with iTP:Local",
  3787  			sourceIP: testExternalClient,
  3788  			destIP:   "9.10.11.12",
  3789  			destPort: 80,
  3790  
  3791  			// internalTrafficPolicy does not apply to LoadBalancer traffic, so
  3792  			// same as "external to LB" above.
  3793  			output: "10.180.0.3:80, 10.180.1.3:80",
  3794  			masq:   true,
  3795  		},
  3796  	}
  3797  
  3798  	type packetFlowTestOverride struct {
  3799  		output *string
  3800  		masq   *bool
  3801  	}
  3802  
  3803  	testCases := []struct {
  3804  		name          string
  3805  		line          string
  3806  		masqueradeAll bool
  3807  		localDetector bool
  3808  		overrides     map[string]packetFlowTestOverride
  3809  	}{
  3810  		{
  3811  			name:          "base",
  3812  			line:          getLine(),
  3813  			masqueradeAll: false,
  3814  			localDetector: true,
  3815  			overrides:     nil,
  3816  		},
  3817  		{
  3818  			name:          "no LocalTrafficDetector",
  3819  			line:          getLine(),
  3820  			masqueradeAll: false,
  3821  			localDetector: false,
  3822  			overrides: map[string]packetFlowTestOverride{
  3823  				// With no LocalTrafficDetector, all traffic to a
  3824  				// ClusterIP is assumed to be from a pod, and thus to not
  3825  				// require masquerading.
  3826  				"node to ClusterIP": {
  3827  					masq: ptr.To(false),
  3828  				},
  3829  				"node to ClusterIP with eTP:Local": {
  3830  					masq: ptr.To(false),
  3831  				},
  3832  				"node to ClusterIP with iTP:Local": {
  3833  					masq: ptr.To(false),
  3834  				},
  3835  				"external to ClusterIP": {
  3836  					masq: ptr.To(false),
  3837  				},
  3838  				"external to ClusterIP with eTP:Local": {
  3839  					masq: ptr.To(false),
  3840  				},
  3841  				"external to ClusterIP with iTP:Local": {
  3842  					masq: ptr.To(false),
  3843  				},
  3844  
  3845  				// And there's no eTP:Local short-circuit for pod traffic,
  3846  				// so pods get only the local endpoints.
  3847  				"pod to NodePort with eTP:Local": {
  3848  					output: ptr.To("10.180.0.2:80"),
  3849  				},
  3850  				"pod to LB with eTP:Local": {
  3851  					output: ptr.To("10.180.0.2:80"),
  3852  				},
  3853  			},
  3854  		},
  3855  		{
  3856  			name:          "masqueradeAll",
  3857  			line:          getLine(),
  3858  			masqueradeAll: true,
  3859  			localDetector: true,
  3860  			overrides: map[string]packetFlowTestOverride{
  3861  				// All "to ClusterIP" traffic gets masqueraded when using
  3862  				// --masquerade-all.
  3863  				"pod to ClusterIP": {
  3864  					masq: ptr.To(true),
  3865  				},
  3866  				"pod to ClusterIP with eTP:Local": {
  3867  					masq: ptr.To(true),
  3868  				},
  3869  				"pod to ClusterIP with iTP:Local": {
  3870  					masq: ptr.To(true),
  3871  				},
  3872  			},
  3873  		},
  3874  		{
  3875  			name:          "masqueradeAll, no LocalTrafficDetector",
  3876  			line:          getLine(),
  3877  			masqueradeAll: true,
  3878  			localDetector: false,
  3879  			overrides: map[string]packetFlowTestOverride{
  3880  				// As in "masqueradeAll"
  3881  				"pod to ClusterIP": {
  3882  					masq: ptr.To(true),
  3883  				},
  3884  				"pod to ClusterIP with eTP:Local": {
  3885  					masq: ptr.To(true),
  3886  				},
  3887  				"pod to ClusterIP with iTP:Local": {
  3888  					masq: ptr.To(true),
  3889  				},
  3890  
  3891  				// As in "no LocalTrafficDetector"
  3892  				"pod to NodePort with eTP:Local": {
  3893  					output: ptr.To("10.180.0.2:80"),
  3894  				},
  3895  				"pod to LB with eTP:Local": {
  3896  					output: ptr.To("10.180.0.2:80"),
  3897  				},
  3898  			},
  3899  		},
  3900  	}
  3901  
  3902  	for _, tc := range testCases {
  3903  		t.Run(tc.name, func(t *testing.T) {
  3904  			nft, fp := NewFakeProxier(v1.IPv4Protocol)
  3905  			fp.masqueradeAll = tc.masqueradeAll
  3906  			if !tc.localDetector {
  3907  				fp.localDetector = proxyutiliptables.NewNoOpLocalDetector()
  3908  			}
  3909  			setupTest(fp)
  3910  
  3911  			// Merge base flowTests with per-test-case overrides
  3912  			tcFlowTests := make([]packetFlowTest, len(flowTests))
  3913  			overridesApplied := 0
  3914  			for i := range flowTests {
  3915  				tcFlowTests[i] = flowTests[i]
  3916  				if overrides, set := tc.overrides[flowTests[i].name]; set {
  3917  					overridesApplied++
  3918  					if overrides.masq != nil {
  3919  						if tcFlowTests[i].masq == *overrides.masq {
  3920  							t.Errorf("%q override value for masq is same as base value", flowTests[i].name)
  3921  						}
  3922  						tcFlowTests[i].masq = *overrides.masq
  3923  					}
  3924  					if overrides.output != nil {
  3925  						if tcFlowTests[i].output == *overrides.output {
  3926  							t.Errorf("%q override value for output is same as base value", flowTests[i].name)
  3927  						}
  3928  						tcFlowTests[i].output = *overrides.output
  3929  					}
  3930  				}
  3931  			}
  3932  			if overridesApplied != len(tc.overrides) {
  3933  				t.Errorf("%d overrides did not match any test case name!", len(tc.overrides)-overridesApplied)
  3934  			}
  3935  			runPacketFlowTests(t, tc.line, nft, testNodeIPs, tcFlowTests)
  3936  		})
  3937  	}
  3938  }
  3939  
  3940  // Test calling syncProxyRules() multiple times with various changes
  3941  func TestSyncProxyRulesRepeated(t *testing.T) {
  3942  	nft, fp := NewFakeProxier(v1.IPv4Protocol)
  3943  
  3944  	baseRules := dedent.Dedent(`
  3945  		add table ip kube-proxy { comment "rules for kube-proxy" ; }
  3946  
  3947  		add chain ip kube-proxy cluster-ips-check
  3948  		add chain ip kube-proxy filter-prerouting { type filter hook prerouting priority -110 ; }
  3949  		add chain ip kube-proxy filter-forward { type filter hook forward priority -110 ; }
  3950  		add chain ip kube-proxy filter-input { type filter hook input priority -110 ; }
  3951  		add chain ip kube-proxy filter-output { type filter hook output priority -110 ; }
  3952  		add chain ip kube-proxy filter-output-post-dnat { type filter hook output priority -90 ; }
  3953  		add chain ip kube-proxy firewall-check
  3954  		add chain ip kube-proxy mark-for-masquerade
  3955  		add chain ip kube-proxy masquerading
  3956  		add chain ip kube-proxy nat-output { type nat hook output priority -100 ; }
  3957  		add chain ip kube-proxy nat-postrouting { type nat hook postrouting priority 100 ; }
  3958  		add chain ip kube-proxy nat-prerouting { type nat hook prerouting priority -100 ; }
  3959  		add chain ip kube-proxy nodeport-endpoints-check
  3960  		add chain ip kube-proxy reject-chain { comment "helper for @no-endpoint-services / @no-endpoint-nodeports" ; }
  3961  		add chain ip kube-proxy services
  3962  		add chain ip kube-proxy service-endpoints-check
  3963  
  3964  		add rule ip kube-proxy cluster-ips-check ip daddr @cluster-ips reject comment "Reject traffic to invalid ports of ClusterIPs"
  3965  		add rule ip kube-proxy cluster-ips-check ip daddr { 172.30.0.0/16 } drop comment "Drop traffic to unallocated ClusterIPs"
  3966  		add rule ip kube-proxy filter-prerouting ct state new jump firewall-check
  3967  		add rule ip kube-proxy filter-forward ct state new jump service-endpoints-check
  3968  		add rule ip kube-proxy filter-forward ct state new jump cluster-ips-check
  3969  		add rule ip kube-proxy filter-input ct state new jump nodeport-endpoints-check
  3970  		add rule ip kube-proxy filter-input ct state new jump service-endpoints-check
  3971  		add rule ip kube-proxy filter-output ct state new jump service-endpoints-check
  3972  		add rule ip kube-proxy filter-output ct state new jump firewall-check
  3973  		add rule ip kube-proxy filter-output-post-dnat ct state new jump cluster-ips-check
  3974  		add rule ip kube-proxy firewall-check ip daddr . meta l4proto . th dport vmap @firewall-ips
  3975  		add rule ip kube-proxy mark-for-masquerade mark set mark or 0x4000
  3976  		add rule ip kube-proxy masquerading mark and 0x4000 == 0 return
  3977  		add rule ip kube-proxy masquerading mark set mark xor 0x4000
  3978  		add rule ip kube-proxy masquerading masquerade fully-random
  3979  		add rule ip kube-proxy nat-output jump services
  3980  		add rule ip kube-proxy nat-postrouting jump masquerading
  3981  		add rule ip kube-proxy nat-prerouting jump services
  3982  		add rule ip kube-proxy nodeport-endpoints-check ip daddr @nodeport-ips meta l4proto . th dport vmap @no-endpoint-nodeports
  3983  		add rule ip kube-proxy reject-chain reject
  3984  		add rule ip kube-proxy services ip daddr . meta l4proto . th dport vmap @service-ips
  3985  		add rule ip kube-proxy services ip daddr @nodeport-ips meta l4proto . th dport vmap @service-nodeports
  3986  		add set ip kube-proxy cluster-ips { type ipv4_addr ; comment "Active ClusterIPs" ; }
  3987  		add set ip kube-proxy nodeport-ips { type ipv4_addr ; comment "IPs that accept NodePort traffic" ; }
  3988  		add rule ip kube-proxy service-endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services
  3989  
  3990  		add map ip kube-proxy firewall-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "destinations that are subject to LoadBalancerSourceRanges" ; }
  3991  		add map ip kube-proxy no-endpoint-nodeports { type inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to service nodeports with no endpoints" ; }
  3992  		add map ip kube-proxy no-endpoint-services { type ipv4_addr . inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to services with no endpoints" ; }
  3993  		add map ip kube-proxy service-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "ClusterIP, ExternalIP and LoadBalancer IP traffic" ; }
  3994  		add map ip kube-proxy service-nodeports { type inet_proto . inet_service : verdict ; comment "NodePort traffic" ; }
  3995  		`)
  3996  
  3997  	// Helper function to make it look like time has passed (from the point of view of
  3998  	// the stale-chain-deletion code).
  3999  	ageStaleChains := func() {
  4000  		for chain, t := range fp.staleChains {
  4001  			fp.staleChains[chain] = t.Add(-2 * time.Second)
  4002  		}
  4003  	}
  4004  
  4005  	// Create initial state
  4006  	var svc2 *v1.Service
  4007  
  4008  	makeServiceMap(fp,
  4009  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
  4010  			svc.Spec.Type = v1.ServiceTypeClusterIP
  4011  			svc.Spec.ClusterIP = "172.30.0.41"
  4012  			svc.Spec.Ports = []v1.ServicePort{{
  4013  				Name:     "p80",
  4014  				Port:     80,
  4015  				Protocol: v1.ProtocolTCP,
  4016  			}}
  4017  		}),
  4018  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
  4019  			svc2 = svc
  4020  			svc.Spec.Type = v1.ServiceTypeClusterIP
  4021  			svc.Spec.ClusterIP = "172.30.0.42"
  4022  			svc.Spec.Ports = []v1.ServicePort{{
  4023  				Name:     "p8080",
  4024  				Port:     8080,
  4025  				Protocol: v1.ProtocolTCP,
  4026  			}}
  4027  		}),
  4028  	)
  4029  
  4030  	populateEndpointSlices(fp,
  4031  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  4032  			eps.AddressType = discovery.AddressTypeIPv4
  4033  			eps.Endpoints = []discovery.Endpoint{{
  4034  				Addresses: []string{"10.0.1.1"},
  4035  			}}
  4036  			eps.Ports = []discovery.EndpointPort{{
  4037  				Name:     ptr.To("p80"),
  4038  				Port:     ptr.To[int32](80),
  4039  				Protocol: ptr.To(v1.ProtocolTCP),
  4040  			}}
  4041  		}),
  4042  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  4043  			eps.AddressType = discovery.AddressTypeIPv4
  4044  			eps.Endpoints = []discovery.Endpoint{{
  4045  				Addresses: []string{"10.0.2.1"},
  4046  			}}
  4047  			eps.Ports = []discovery.EndpointPort{{
  4048  				Name:     ptr.To("p8080"),
  4049  				Port:     ptr.To[int32](8080),
  4050  				Protocol: ptr.To(v1.ProtocolTCP),
  4051  			}}
  4052  		}),
  4053  	)
  4054  
  4055  	fp.syncProxyRules()
  4056  
  4057  	expected := baseRules + dedent.Dedent(`
  4058  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4059  		add element ip kube-proxy cluster-ips { 172.30.0.42 }
  4060  		add element ip kube-proxy nodeport-ips { 192.168.0.2 }
  4061  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4062  		add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 }
  4063  
  4064  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4065  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4066  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4067  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4068  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4069  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4070  
  4071  		add chain ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080
  4072  		add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 ip daddr 172.30.0.42 tcp dport 8080 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4073  		add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 numgen random mod 1 vmap { 0 : goto endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 }
  4074  		add chain ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080
  4075  		add rule ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 ip saddr 10.0.2.1 jump mark-for-masquerade
  4076  		add rule ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 meta l4proto tcp dnat to 10.0.2.1:8080
  4077                  `)
  4078  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4079  
  4080  	// Add a new service and its endpoints
  4081  	makeServiceMap(fp,
  4082  		makeTestService("ns3", "svc3", func(svc *v1.Service) {
  4083  			svc.Spec.Type = v1.ServiceTypeClusterIP
  4084  			svc.Spec.ClusterIP = "172.30.0.43"
  4085  			svc.Spec.Ports = []v1.ServicePort{{
  4086  				Name:     "p80",
  4087  				Port:     80,
  4088  				Protocol: v1.ProtocolTCP,
  4089  			}}
  4090  		}),
  4091  	)
  4092  	var eps3 *discovery.EndpointSlice
  4093  	populateEndpointSlices(fp,
  4094  		makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
  4095  			eps3 = eps
  4096  			eps.AddressType = discovery.AddressTypeIPv4
  4097  			eps.Endpoints = []discovery.Endpoint{{
  4098  				Addresses: []string{"10.0.3.1"},
  4099  			}}
  4100  			eps.Ports = []discovery.EndpointPort{{
  4101  				Name:     ptr.To("p80"),
  4102  				Port:     ptr.To[int32](80),
  4103  				Protocol: ptr.To(v1.ProtocolTCP),
  4104  			}}
  4105  		}),
  4106  	)
  4107  	fp.syncProxyRules()
  4108  
  4109  	expected = baseRules + dedent.Dedent(`
  4110  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4111  		add element ip kube-proxy cluster-ips { 172.30.0.42 }
  4112  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4113  		add element ip kube-proxy nodeport-ips { 192.168.0.2 }
  4114  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4115  		add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 }
  4116  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
  4117  
  4118  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4119  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4120  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4121  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4122  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4123  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4124  
  4125  		add chain ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080
  4126  		add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 ip daddr 172.30.0.42 tcp dport 8080 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4127  		add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 numgen random mod 1 vmap { 0 : goto endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 }
  4128  		add chain ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080
  4129  		add rule ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 ip saddr 10.0.2.1 jump mark-for-masquerade
  4130  		add rule ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 meta l4proto tcp dnat to 10.0.2.1:8080
  4131  
  4132  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4133  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4134  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 }
  4135  		add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
  4136  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 ip saddr 10.0.3.1 jump mark-for-masquerade
  4137  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 meta l4proto tcp dnat to 10.0.3.1:80
  4138  		`)
  4139  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4140  
  4141  	// Delete a service; its chains will be flushed, but not immediately deleted.
  4142  	fp.OnServiceDelete(svc2)
  4143  	fp.syncProxyRules()
  4144  	expected = baseRules + dedent.Dedent(`
  4145  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4146  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4147  		add element ip kube-proxy nodeport-ips { 192.168.0.2 }
  4148  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4149  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
  4150  
  4151  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4152  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4153  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4154  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4155  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4156  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4157  
  4158  		add chain ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080
  4159  		add chain ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080
  4160  
  4161  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4162  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4163  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 }
  4164  		add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
  4165  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 ip saddr 10.0.3.1 jump mark-for-masquerade
  4166  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 meta l4proto tcp dnat to 10.0.3.1:80
  4167  		`)
  4168  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4169  
  4170  	// Fake the passage of time and confirm that the stale chains get deleted.
  4171  	ageStaleChains()
  4172  	fp.syncProxyRules()
  4173  	expected = baseRules + dedent.Dedent(`
  4174  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4175  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4176  		add element ip kube-proxy nodeport-ips { 192.168.0.2 }
  4177  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4178  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
  4179  
  4180  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4181  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4182  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4183  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4184  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4185  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4186  
  4187  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4188  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4189  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 }
  4190  		add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
  4191  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 ip saddr 10.0.3.1 jump mark-for-masquerade
  4192  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 meta l4proto tcp dnat to 10.0.3.1:80
  4193  		`)
  4194  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4195  
  4196  	// Add a service, sync, then add its endpoints.
  4197  	makeServiceMap(fp,
  4198  		makeTestService("ns4", "svc4", func(svc *v1.Service) {
  4199  			svc.Spec.Type = v1.ServiceTypeClusterIP
  4200  			svc.Spec.ClusterIP = "172.30.0.44"
  4201  			svc.Spec.Ports = []v1.ServicePort{{
  4202  				Name:     "p80",
  4203  				Port:     80,
  4204  				Protocol: v1.ProtocolTCP,
  4205  			}}
  4206  		}),
  4207  	)
  4208  	fp.syncProxyRules()
  4209  	expected = baseRules + dedent.Dedent(`
  4210  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4211  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4212  		add element ip kube-proxy cluster-ips { 172.30.0.44 }
  4213  		add element ip kube-proxy nodeport-ips { 192.168.0.2 }
  4214  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4215  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
  4216  
  4217  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4218  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4219  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4220  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4221  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4222  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4223  
  4224  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4225  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4226  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 }
  4227  		add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
  4228  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 ip saddr 10.0.3.1 jump mark-for-masquerade
  4229  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 meta l4proto tcp dnat to 10.0.3.1:80
  4230  
  4231  		add element ip kube-proxy no-endpoint-services { 172.30.0.44 . tcp . 80 comment "ns4/svc4:p80" : goto reject-chain }
  4232  		`)
  4233  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4234  
  4235  	populateEndpointSlices(fp,
  4236  		makeTestEndpointSlice("ns4", "svc4", 1, func(eps *discovery.EndpointSlice) {
  4237  			eps.AddressType = discovery.AddressTypeIPv4
  4238  			eps.Endpoints = []discovery.Endpoint{{
  4239  				Addresses: []string{"10.0.4.1"},
  4240  			}}
  4241  			eps.Ports = []discovery.EndpointPort{{
  4242  				Name:     ptr.To("p80"),
  4243  				Port:     ptr.To[int32](80),
  4244  				Protocol: ptr.To(v1.ProtocolTCP),
  4245  			}}
  4246  		}),
  4247  	)
  4248  	fp.syncProxyRules()
  4249  	expected = baseRules + dedent.Dedent(`
  4250  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4251  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4252  		add element ip kube-proxy cluster-ips { 172.30.0.44 }
  4253  		add element ip kube-proxy nodeport-ips { 192.168.0.2 }
  4254  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4255  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
  4256  		add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
  4257  
  4258  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4259  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4260  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4261  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4262  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4263  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4264  
  4265  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4266  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4267  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 }
  4268  		add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
  4269  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 ip saddr 10.0.3.1 jump mark-for-masquerade
  4270  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 meta l4proto tcp dnat to 10.0.3.1:80
  4271  
  4272  		add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
  4273  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4274  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 }
  4275  		add chain ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80
  4276  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 ip saddr 10.0.4.1 jump mark-for-masquerade
  4277  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 meta l4proto tcp dnat to 10.0.4.1:80
  4278  		`)
  4279  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4280  
  4281  	// Change an endpoint of an existing service.
  4282  	eps3update := eps3.DeepCopy()
  4283  	eps3update.Endpoints[0].Addresses[0] = "10.0.3.2"
  4284  	fp.OnEndpointSliceUpdate(eps3, eps3update)
  4285  	fp.syncProxyRules()
  4286  
  4287  	// The old endpoint chain (for 10.0.3.1) will not be deleted yet.
  4288  	expected = baseRules + dedent.Dedent(`
  4289  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4290  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4291  		add element ip kube-proxy cluster-ips { 172.30.0.44 }
  4292  		add element ip kube-proxy nodeport-ips { 192.168.0.2 }
  4293  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4294  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
  4295  		add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
  4296  
  4297  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4298  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4299  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4300  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4301  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4302  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4303  
  4304  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4305  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4306  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 }
  4307  		add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
  4308  		add chain ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80
  4309  		add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 ip saddr 10.0.3.2 jump mark-for-masquerade
  4310  		add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 meta l4proto tcp dnat to 10.0.3.2:80
  4311  
  4312  		add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
  4313  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4314  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 }
  4315  		add chain ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80
  4316  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 ip saddr 10.0.4.1 jump mark-for-masquerade
  4317  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 meta l4proto tcp dnat to 10.0.4.1:80
  4318  		`)
  4319  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4320  
  4321  	// (Ensure the old svc3 chain gets deleted in the next sync.)
  4322  	ageStaleChains()
  4323  
  4324  	// Add an endpoint to a service.
  4325  	eps3update2 := eps3update.DeepCopy()
  4326  	eps3update2.Endpoints = append(eps3update2.Endpoints, discovery.Endpoint{Addresses: []string{"10.0.3.3"}})
  4327  	fp.OnEndpointSliceUpdate(eps3update, eps3update2)
  4328  	fp.syncProxyRules()
  4329  
  4330  	expected = baseRules + dedent.Dedent(`
  4331  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4332  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4333  		add element ip kube-proxy cluster-ips { 172.30.0.44 }
  4334  		add element ip kube-proxy nodeport-ips { 192.168.0.2 }
  4335  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4336  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
  4337  		add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
  4338  
  4339  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4340  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4341  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4342  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4343  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4344  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4345  
  4346  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4347  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4348  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 2 vmap { 0 : goto endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 , 1 : goto endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 }
  4349  		add chain ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80
  4350  		add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 ip saddr 10.0.3.2 jump mark-for-masquerade
  4351  		add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 meta l4proto tcp dnat to 10.0.3.2:80
  4352  		add chain ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80
  4353  		add rule ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 ip saddr 10.0.3.3 jump mark-for-masquerade
  4354  		add rule ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 meta l4proto tcp dnat to 10.0.3.3:80
  4355  
  4356  		add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
  4357  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4358  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 }
  4359  		add chain ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80
  4360  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 ip saddr 10.0.4.1 jump mark-for-masquerade
  4361  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 meta l4proto tcp dnat to 10.0.4.1:80
  4362  		`)
  4363  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4364  
  4365  	// Empty a service's endpoints; its chains will be flushed, but not immediately deleted.
  4366  	eps3update3 := eps3update2.DeepCopy()
  4367  	eps3update3.Endpoints = []discovery.Endpoint{}
  4368  	fp.OnEndpointSliceUpdate(eps3update2, eps3update3)
  4369  	fp.syncProxyRules()
  4370  	expected = baseRules + dedent.Dedent(`
  4371  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4372  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4373  		add element ip kube-proxy cluster-ips { 172.30.0.44 }
  4374  		add element ip kube-proxy nodeport-ips { 192.168.0.2 }
  4375  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4376  		add element ip kube-proxy no-endpoint-services { 172.30.0.43 . tcp . 80 comment "ns3/svc3:p80" : goto reject-chain }
  4377  		add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
  4378  
  4379  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4380  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4381  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4382  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4383  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4384  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4385  
  4386  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4387  		add chain ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80
  4388  		add chain ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80
  4389  
  4390  		add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
  4391  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4392  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 }
  4393  		add chain ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80
  4394  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 ip saddr 10.0.4.1 jump mark-for-masquerade
  4395  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 meta l4proto tcp dnat to 10.0.4.1:80
  4396  		`)
  4397  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4398  	expectedStaleChains := sets.NewString("service-4AT6LBPK-ns3/svc3/tcp/p80", "endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80", "endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80")
  4399  	gotStaleChains := sets.StringKeySet(fp.staleChains)
  4400  	if !expectedStaleChains.Equal(gotStaleChains) {
  4401  		t.Errorf("expected stale chains %v, got %v", expectedStaleChains, gotStaleChains)
  4402  	}
  4403  	// Restore endpoints to non-empty immediately; its chains will be restored, and deleted from staleChains.
  4404  	fp.OnEndpointSliceUpdate(eps3update3, eps3update2)
  4405  	fp.syncProxyRules()
  4406  	expected = baseRules + dedent.Dedent(`
  4407  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4408  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4409  		add element ip kube-proxy cluster-ips { 172.30.0.44 }
  4410  		add element ip kube-proxy nodeport-ips { 192.168.0.2 }
  4411  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4412  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
  4413  		add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
  4414  
  4415  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4416  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4417  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4418  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4419  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4420  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4421  
  4422  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4423  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4424  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 2 vmap { 0 : goto endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 , 1 : goto endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 }
  4425  		add chain ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80
  4426  		add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 ip saddr 10.0.3.2 jump mark-for-masquerade
  4427  		add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 meta l4proto tcp dnat to 10.0.3.2:80
  4428  		add chain ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80
  4429  		add rule ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 ip saddr 10.0.3.3 jump mark-for-masquerade
  4430  		add rule ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 meta l4proto tcp dnat to 10.0.3.3:80
  4431  
  4432  		add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
  4433  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4434  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 }
  4435  		add chain ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80
  4436  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 ip saddr 10.0.4.1 jump mark-for-masquerade
  4437  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 meta l4proto tcp dnat to 10.0.4.1:80
  4438  		`)
  4439  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4440  	if len(fp.staleChains) != 0 {
  4441  		t.Errorf("unexpected stale chains: %v", fp.staleChains)
  4442  	}
  4443  
  4444  	// Empty a service's endpoints and restore it after stale chains age.
  4445  	// - its chains will be flushed, but not immediately deleted in the first sync.
  4446  	// - its chains will be deleted first, then recreated in the second sync.
  4447  	fp.OnEndpointSliceUpdate(eps3update2, eps3update3)
  4448  	fp.syncProxyRules()
  4449  	ageStaleChains()
  4450  	fp.OnEndpointSliceUpdate(eps3update3, eps3update2)
  4451  	fp.syncProxyRules()
  4452  	// The second change counteracts the first one, so same expected rules as last time
  4453  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4454  
  4455  	// Sync with no new changes, so same expected rules as last time
  4456  	fp.syncProxyRules()
  4457  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4458  }
  4459  
  4460  func TestNoEndpointsMetric(t *testing.T) {
  4461  	type endpoint struct {
  4462  		ip       string
  4463  		hostname string
  4464  	}
  4465  
  4466  	metrics.RegisterMetrics()
  4467  	testCases := []struct {
  4468  		name                                                string
  4469  		internalTrafficPolicy                               *v1.ServiceInternalTrafficPolicy
  4470  		externalTrafficPolicy                               v1.ServiceExternalTrafficPolicy
  4471  		endpoints                                           []endpoint
  4472  		expectedSyncProxyRulesNoLocalEndpointsTotalInternal int
  4473  		expectedSyncProxyRulesNoLocalEndpointsTotalExternal int
  4474  	}{
  4475  		{
  4476  			name:                  "internalTrafficPolicy is set and there are local endpoints",
  4477  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4478  			endpoints: []endpoint{
  4479  				{"10.0.1.1", testHostname},
  4480  				{"10.0.1.2", "host1"},
  4481  				{"10.0.1.3", "host2"},
  4482  			},
  4483  		},
  4484  		{
  4485  			name:                  "externalTrafficPolicy is set and there are local endpoints",
  4486  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  4487  			endpoints: []endpoint{
  4488  				{"10.0.1.1", testHostname},
  4489  				{"10.0.1.2", "host1"},
  4490  				{"10.0.1.3", "host2"},
  4491  			},
  4492  		},
  4493  		{
  4494  			name:                  "both policies are set and there are local endpoints",
  4495  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4496  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  4497  			endpoints: []endpoint{
  4498  				{"10.0.1.1", testHostname},
  4499  				{"10.0.1.2", "host1"},
  4500  				{"10.0.1.3", "host2"},
  4501  			},
  4502  		},
  4503  		{
  4504  			name:                  "internalTrafficPolicy is set and there are no local endpoints",
  4505  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4506  			endpoints: []endpoint{
  4507  				{"10.0.1.1", "host0"},
  4508  				{"10.0.1.2", "host1"},
  4509  				{"10.0.1.3", "host2"},
  4510  			},
  4511  			expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 1,
  4512  		},
  4513  		{
  4514  			name:                  "externalTrafficPolicy is set and there are no local endpoints",
  4515  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  4516  			endpoints: []endpoint{
  4517  				{"10.0.1.1", "host0"},
  4518  				{"10.0.1.2", "host1"},
  4519  				{"10.0.1.3", "host2"},
  4520  			},
  4521  			expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 1,
  4522  		},
  4523  		{
  4524  			name:                  "both policies are set and there are no local endpoints",
  4525  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4526  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  4527  			endpoints: []endpoint{
  4528  				{"10.0.1.1", "host0"},
  4529  				{"10.0.1.2", "host1"},
  4530  				{"10.0.1.3", "host2"},
  4531  			},
  4532  			expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 1,
  4533  			expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 1,
  4534  		},
  4535  		{
  4536  			name:                  "both policies are set and there are no endpoints at all",
  4537  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4538  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  4539  			endpoints:             []endpoint{},
  4540  			expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 0,
  4541  			expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 0,
  4542  		},
  4543  	}
  4544  
  4545  	for _, tc := range testCases {
  4546  		t.Run(tc.name, func(t *testing.T) {
  4547  			_, fp := NewFakeProxier(v1.IPv4Protocol)
  4548  			fp.OnServiceSynced()
  4549  			fp.OnEndpointSlicesSynced()
  4550  
  4551  			serviceName := "svc1"
  4552  			namespaceName := "ns1"
  4553  
  4554  			svc := &v1.Service{
  4555  				ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
  4556  				Spec: v1.ServiceSpec{
  4557  					ClusterIP: "172.30.1.1",
  4558  					Selector:  map[string]string{"foo": "bar"},
  4559  					Ports:     []v1.ServicePort{{Name: "", Port: 80, Protocol: v1.ProtocolTCP, NodePort: 123}},
  4560  				},
  4561  			}
  4562  			if tc.internalTrafficPolicy != nil {
  4563  				svc.Spec.InternalTrafficPolicy = tc.internalTrafficPolicy
  4564  			}
  4565  			if tc.externalTrafficPolicy != "" {
  4566  				svc.Spec.Type = v1.ServiceTypeNodePort
  4567  				svc.Spec.ExternalTrafficPolicy = tc.externalTrafficPolicy
  4568  			}
  4569  
  4570  			fp.OnServiceAdd(svc)
  4571  
  4572  			endpointSlice := &discovery.EndpointSlice{
  4573  				ObjectMeta: metav1.ObjectMeta{
  4574  					Name:      fmt.Sprintf("%s-1", serviceName),
  4575  					Namespace: namespaceName,
  4576  					Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  4577  				},
  4578  				Ports: []discovery.EndpointPort{{
  4579  					Name:     ptr.To(""),
  4580  					Port:     ptr.To[int32](80),
  4581  					Protocol: ptr.To(v1.ProtocolTCP),
  4582  				}},
  4583  				AddressType: discovery.AddressTypeIPv4,
  4584  			}
  4585  			for _, ep := range tc.endpoints {
  4586  				endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{
  4587  					Addresses:  []string{ep.ip},
  4588  					Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  4589  					NodeName:   ptr.To(ep.hostname),
  4590  				})
  4591  			}
  4592  
  4593  			fp.OnEndpointSliceAdd(endpointSlice)
  4594  			fp.syncProxyRules()
  4595  			syncProxyRulesNoLocalEndpointsTotalInternal, err := testutil.GetGaugeMetricValue(metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("internal"))
  4596  			if err != nil {
  4597  				t.Errorf("failed to get %s value, err: %v", metrics.SyncProxyRulesNoLocalEndpointsTotal.Name, err)
  4598  			}
  4599  
  4600  			if tc.expectedSyncProxyRulesNoLocalEndpointsTotalInternal != int(syncProxyRulesNoLocalEndpointsTotalInternal) {
  4601  				t.Errorf("sync_proxy_rules_no_endpoints_total metric mismatch(internal): got=%d, expected %d", int(syncProxyRulesNoLocalEndpointsTotalInternal), tc.expectedSyncProxyRulesNoLocalEndpointsTotalInternal)
  4602  			}
  4603  
  4604  			syncProxyRulesNoLocalEndpointsTotalExternal, err := testutil.GetGaugeMetricValue(metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("external"))
  4605  			if err != nil {
  4606  				t.Errorf("failed to get %s value(external), err: %v", metrics.SyncProxyRulesNoLocalEndpointsTotal.Name, err)
  4607  			}
  4608  
  4609  			if tc.expectedSyncProxyRulesNoLocalEndpointsTotalExternal != int(syncProxyRulesNoLocalEndpointsTotalExternal) {
  4610  				t.Errorf("sync_proxy_rules_no_endpoints_total metric mismatch(internal): got=%d, expected %d", int(syncProxyRulesNoLocalEndpointsTotalExternal), tc.expectedSyncProxyRulesNoLocalEndpointsTotalExternal)
  4611  			}
  4612  		})
  4613  	}
  4614  }
  4615  
  4616  func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) {
  4617  	testCases := []struct {
  4618  		name          string
  4619  		ipModeEnabled bool
  4620  		svcIP         string
  4621  		svcLBIP       string
  4622  		ipMode        *v1.LoadBalancerIPMode
  4623  		expectedRule  bool
  4624  	}{
  4625  		/* LoadBalancerIPMode disabled */
  4626  		{
  4627  			name:          "LoadBalancerIPMode disabled, ipMode Proxy",
  4628  			ipModeEnabled: false,
  4629  			svcIP:         "10.20.30.41",
  4630  			svcLBIP:       "1.2.3.4",
  4631  			ipMode:        ptr.To(v1.LoadBalancerIPModeProxy),
  4632  			expectedRule:  true,
  4633  		},
  4634  		{
  4635  			name:          "LoadBalancerIPMode disabled, ipMode VIP",
  4636  			ipModeEnabled: false,
  4637  			svcIP:         "10.20.30.42",
  4638  			svcLBIP:       "1.2.3.5",
  4639  			ipMode:        ptr.To(v1.LoadBalancerIPModeVIP),
  4640  			expectedRule:  true,
  4641  		},
  4642  		{
  4643  			name:          "LoadBalancerIPMode disabled, ipMode nil",
  4644  			ipModeEnabled: false,
  4645  			svcIP:         "10.20.30.43",
  4646  			svcLBIP:       "1.2.3.6",
  4647  			ipMode:        nil,
  4648  			expectedRule:  true,
  4649  		},
  4650  		/* LoadBalancerIPMode enabled */
  4651  		{
  4652  			name:          "LoadBalancerIPMode enabled, ipMode Proxy",
  4653  			ipModeEnabled: true,
  4654  			svcIP:         "10.20.30.41",
  4655  			svcLBIP:       "1.2.3.4",
  4656  			ipMode:        ptr.To(v1.LoadBalancerIPModeProxy),
  4657  			expectedRule:  false,
  4658  		},
  4659  		{
  4660  			name:          "LoadBalancerIPMode enabled, ipMode VIP",
  4661  			ipModeEnabled: true,
  4662  			svcIP:         "10.20.30.42",
  4663  			svcLBIP:       "1.2.3.5",
  4664  			ipMode:        ptr.To(v1.LoadBalancerIPModeVIP),
  4665  			expectedRule:  true,
  4666  		},
  4667  		{
  4668  			name:          "LoadBalancerIPMode enabled, ipMode nil",
  4669  			ipModeEnabled: true,
  4670  			svcIP:         "10.20.30.43",
  4671  			svcLBIP:       "1.2.3.6",
  4672  			ipMode:        nil,
  4673  			expectedRule:  true,
  4674  		},
  4675  	}
  4676  
  4677  	svcPort := 80
  4678  	svcNodePort := 3001
  4679  	svcPortName := proxy.ServicePortName{
  4680  		NamespacedName: makeNSN("ns1", "svc1"),
  4681  		Port:           "p80",
  4682  		Protocol:       v1.ProtocolTCP,
  4683  	}
  4684  
  4685  	for _, testCase := range testCases {
  4686  		t.Run(testCase.name, func(t *testing.T) {
  4687  			defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LoadBalancerIPMode, testCase.ipModeEnabled)()
  4688  			nft, fp := NewFakeProxier(v1.IPv4Protocol)
  4689  			makeServiceMap(fp,
  4690  				makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  4691  					svc.Spec.Type = "LoadBalancer"
  4692  					svc.Spec.ClusterIP = testCase.svcIP
  4693  					svc.Spec.Ports = []v1.ServicePort{{
  4694  						Name:     svcPortName.Port,
  4695  						Port:     int32(svcPort),
  4696  						Protocol: v1.ProtocolTCP,
  4697  						NodePort: int32(svcNodePort),
  4698  					}}
  4699  					svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  4700  						IP:     testCase.svcLBIP,
  4701  						IPMode: testCase.ipMode,
  4702  					}}
  4703  				}),
  4704  			)
  4705  
  4706  			populateEndpointSlices(fp,
  4707  				makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  4708  					eps.AddressType = discovery.AddressTypeIPv4
  4709  					eps.Endpoints = []discovery.Endpoint{{
  4710  						Addresses: []string{"10.180.0.1"},
  4711  					}}
  4712  					eps.Ports = []discovery.EndpointPort{{
  4713  						Name:     ptr.To("p80"),
  4714  						Port:     ptr.To[int32](80),
  4715  						Protocol: ptr.To(v1.ProtocolTCP),
  4716  					}}
  4717  				}),
  4718  			)
  4719  
  4720  			fp.syncProxyRules()
  4721  
  4722  			element := nft.Table.Maps["service-ips"].FindElement(testCase.svcLBIP, "tcp", "80")
  4723  			ruleExists := element != nil
  4724  			if ruleExists != testCase.expectedRule {
  4725  				t.Errorf("unexpected rule for %s", testCase.svcLBIP)
  4726  			}
  4727  		})
  4728  	}
  4729  }
  4730  
  4731  func Test_servicePortChainNameBase(t *testing.T) {
  4732  	testCases := []struct {
  4733  		name     string
  4734  		spn      proxy.ServicePortName
  4735  		protocol string
  4736  		expected string
  4737  	}{
  4738  		{
  4739  			name: "simple",
  4740  			spn: proxy.ServicePortName{
  4741  				NamespacedName: types.NamespacedName{
  4742  					Namespace: "testing",
  4743  					Name:      "service",
  4744  				},
  4745  				Port: "http",
  4746  			},
  4747  			protocol: "tcp",
  4748  			expected: "P4ZYZVCF-testing/service/tcp/http",
  4749  		},
  4750  		{
  4751  			name: "different port, different hash",
  4752  			spn: proxy.ServicePortName{
  4753  				NamespacedName: types.NamespacedName{
  4754  					Namespace: "testing",
  4755  					Name:      "service",
  4756  				},
  4757  				Port: "https",
  4758  			},
  4759  			protocol: "tcp",
  4760  			expected: "LZBRENCP-testing/service/tcp/https",
  4761  		},
  4762  		{
  4763  			name: "max length",
  4764  			spn: proxy.ServicePortName{
  4765  				NamespacedName: types.NamespacedName{
  4766  					Namespace: "very-long-namespace-name-abcdefghijklmnopqrstuvwxyz0123456789xx",
  4767  					Name:      "very-long-service-name-why-would-you-even-do-this-i-mean-really",
  4768  				},
  4769  				Port: "port-443-providing-the-hypertext-transmission-protocol-with-tls",
  4770  			},
  4771  			protocol: "sctp",
  4772  			expected: "KR6NACJP-very-long-namespace-name-abcdefghijklmnopqrstuvwxyz0123456789xx/very-long-service-name-why-would-you-even-do-this-i-mean-really/sctp/port-443-providing-the-hypertext-transmission-protocol-with-tls",
  4773  		},
  4774  	}
  4775  
  4776  	for _, tc := range testCases {
  4777  		t.Run(tc.name, func(t *testing.T) {
  4778  			name := servicePortChainNameBase(&tc.spn, tc.protocol)
  4779  			if name != tc.expected {
  4780  				t.Errorf("expected %q, got %q", tc.expected, name)
  4781  			}
  4782  		})
  4783  	}
  4784  }
  4785  
  4786  func Test_servicePortEndpointChainNameBase(t *testing.T) {
  4787  	testCases := []struct {
  4788  		name     string
  4789  		spn      proxy.ServicePortName
  4790  		protocol string
  4791  		endpoint string
  4792  		expected string
  4793  	}{
  4794  		{
  4795  			name: "simple",
  4796  			spn: proxy.ServicePortName{
  4797  				NamespacedName: types.NamespacedName{
  4798  					Namespace: "testing",
  4799  					Name:      "service",
  4800  				},
  4801  				Port: "http",
  4802  			},
  4803  			protocol: "tcp",
  4804  			endpoint: "10.180.0.1:80",
  4805  			expected: "JO2XBXZR-testing/service/tcp/http__10.180.0.1/80",
  4806  		},
  4807  		{
  4808  			name: "different endpoint, different hash",
  4809  			spn: proxy.ServicePortName{
  4810  				NamespacedName: types.NamespacedName{
  4811  					Namespace: "testing",
  4812  					Name:      "service",
  4813  				},
  4814  				Port: "http",
  4815  			},
  4816  			protocol: "tcp",
  4817  			endpoint: "10.180.0.2:80",
  4818  			expected: "5S6H3H22-testing/service/tcp/http__10.180.0.2/80",
  4819  		},
  4820  		{
  4821  			name: "ipv6",
  4822  			spn: proxy.ServicePortName{
  4823  				NamespacedName: types.NamespacedName{
  4824  					Namespace: "testing",
  4825  					Name:      "service",
  4826  				},
  4827  				Port: "http",
  4828  			},
  4829  			protocol: "tcp",
  4830  			endpoint: "[fd80:abcd:12::a1b2:c3d4:e5f6:9999]:80",
  4831  			expected: "U7E2ET36-testing/service/tcp/http__fd80.abcd.12..a1b2.c3d4.e5f6.9999/80",
  4832  		},
  4833  		{
  4834  			name: "max length without truncation",
  4835  			spn: proxy.ServicePortName{
  4836  				NamespacedName: types.NamespacedName{
  4837  					Namespace: "very-long-namespace-name-abcdefghijklmnopqrstuvwxyz0123456789xx",
  4838  					Name:      "very-long-service-name-why-would-you-even-do-this-i-mean-really",
  4839  				},
  4840  				Port: "port-443-providing-the-hypertext-transmission-protocol-with-tls",
  4841  			},
  4842  			protocol: "sctp",
  4843  			endpoint: "[1234:5678:9abc:def0::abc:1234]:443",
  4844  			expected: "5YS7AFEA-very-long-namespace-name-abcdefghijklmnopqrstuvwxyz0123456789xx/very-long-service-name-why-would-you-even-do-this-i-mean-really/sctp/port-443-providing-the-hypertext-transmission-protocol-with-tls__1234.5678.9abc.def0..abc.1234/443",
  4845  		},
  4846  		{
  4847  			name: "truncated, 1",
  4848  			spn: proxy.ServicePortName{
  4849  				NamespacedName: types.NamespacedName{
  4850  					Namespace: "very-long-namespace-name-abcdefghijklmnopqrstuvwxyz0123456789xx",
  4851  					Name:      "very-long-service-name-why-would-you-even-do-this-i-mean-really",
  4852  				},
  4853  				Port: "port-443-providing-the-hypertext-transmission-protocol-with-tls",
  4854  			},
  4855  			protocol: "sctp",
  4856  			endpoint: "[1234:5678:9abc:def0::abcd:1234:5678]:443",
  4857  			expected: "CI6C53Q3-very-long-namespace-name-abcdefghijklmnopqrstuvwxyz0123456789xx/very-long-service-name-why-would-you-even-do-this-i-mean-really/sctp/port-443-providing-the-hypertext-transmission-protocol-with-tls__1234.5678.9abc.def0..abcd.1234...",
  4858  		},
  4859  		{
  4860  			name: "truncated, 2 (different IP, which is not visible in the result)",
  4861  			spn: proxy.ServicePortName{
  4862  				NamespacedName: types.NamespacedName{
  4863  					Namespace: "very-long-namespace-name-abcdefghijklmnopqrstuvwxyz0123456789xx",
  4864  					Name:      "very-long-service-name-why-would-you-even-do-this-i-mean-really",
  4865  				},
  4866  				Port: "port-443-providing-the-hypertext-transmission-protocol-with-tls",
  4867  			},
  4868  			protocol: "sctp",
  4869  			endpoint: "[1234:5678:9abc:def0::abcd:1234:8765]:443",
  4870  			expected: "2FLXFK6X-very-long-namespace-name-abcdefghijklmnopqrstuvwxyz0123456789xx/very-long-service-name-why-would-you-even-do-this-i-mean-really/sctp/port-443-providing-the-hypertext-transmission-protocol-with-tls__1234.5678.9abc.def0..abcd.1234...",
  4871  		},
  4872  	}
  4873  
  4874  	for _, tc := range testCases {
  4875  		t.Run(tc.name, func(t *testing.T) {
  4876  			name := servicePortEndpointChainNameBase(&tc.spn, tc.protocol, tc.endpoint)
  4877  			if name != tc.expected {
  4878  				t.Errorf("expected %q, got %q", tc.expected, name)
  4879  			}
  4880  		})
  4881  	}
  4882  }
  4883  
  4884  func TestProxier_OnServiceCIDRsChanged(t *testing.T) {
  4885  	var proxier *Proxier
  4886  
  4887  	proxier = &Proxier{ipFamily: v1.IPv4Protocol}
  4888  	proxier.OnServiceCIDRsChanged([]string{"172.30.0.0/16", "fd00:10:96::/112"})
  4889  	assert.Equal(t, proxier.serviceCIDRs, "172.30.0.0/16")
  4890  
  4891  	proxier.OnServiceCIDRsChanged([]string{"172.30.0.0/16", "172.50.0.0/16", "fd00:10:96::/112", "fd00:172:30::/112"})
  4892  	assert.Equal(t, proxier.serviceCIDRs, "172.30.0.0/16,172.50.0.0/16")
  4893  
  4894  	proxier = &Proxier{ipFamily: v1.IPv6Protocol}
  4895  	proxier.OnServiceCIDRsChanged([]string{"172.30.0.0/16", "fd00:10:96::/112"})
  4896  	assert.Equal(t, proxier.serviceCIDRs, "fd00:10:96::/112")
  4897  
  4898  	proxier.OnServiceCIDRsChanged([]string{"172.30.0.0/16", "172.50.0.0/16", "fd00:10:96::/112", "fd00:172:30::/112"})
  4899  	assert.Equal(t, proxier.serviceCIDRs, "fd00:10:96::/112,fd00:172:30::/112")
  4900  }
  4901  

View as plain text