...

Source file src/k8s.io/kubernetes/pkg/proxy/iptables/proxier_test.go

Documentation: k8s.io/kubernetes/pkg/proxy/iptables

     1  //go:build linux
     2  // +build linux
     3  
     4  /*
     5  Copyright 2015 The Kubernetes Authors.
     6  
     7  Licensed under the Apache License, Version 2.0 (the "License");
     8  you may not use this file except in compliance with the License.
     9  You may obtain a copy of the License at
    10  
    11      http://www.apache.org/licenses/LICENSE-2.0
    12  
    13  Unless required by applicable law or agreed to in writing, software
    14  distributed under the License is distributed on an "AS IS" BASIS,
    15  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    16  See the License for the specific language governing permissions and
    17  limitations under the License.
    18  */
    19  
    20  package iptables
    21  
    22  import (
    23  	"bytes"
    24  	"fmt"
    25  	"net"
    26  	"reflect"
    27  	"regexp"
    28  	stdruntime "runtime"
    29  	"sort"
    30  	"strconv"
    31  	"strings"
    32  	"testing"
    33  	"time"
    34  
    35  	"github.com/google/go-cmp/cmp"
    36  	"github.com/lithammer/dedent"
    37  	"github.com/stretchr/testify/assert"
    38  	v1 "k8s.io/api/core/v1"
    39  	discovery "k8s.io/api/discovery/v1"
    40  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    41  	"k8s.io/apimachinery/pkg/types"
    42  	"k8s.io/apimachinery/pkg/util/intstr"
    43  	"k8s.io/apimachinery/pkg/util/sets"
    44  	utilfeature "k8s.io/apiserver/pkg/util/feature"
    45  	featuregatetesting "k8s.io/component-base/featuregate/testing"
    46  	"k8s.io/component-base/metrics/legacyregistry"
    47  	"k8s.io/component-base/metrics/testutil"
    48  	"k8s.io/klog/v2"
    49  	"k8s.io/kubernetes/pkg/features"
    50  	"k8s.io/kubernetes/pkg/proxy"
    51  	"k8s.io/kubernetes/pkg/proxy/conntrack"
    52  	"k8s.io/kubernetes/pkg/proxy/metrics"
    53  
    54  	"k8s.io/kubernetes/pkg/proxy/healthcheck"
    55  	proxyutil "k8s.io/kubernetes/pkg/proxy/util"
    56  	proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
    57  	proxyutiltest "k8s.io/kubernetes/pkg/proxy/util/testing"
    58  	"k8s.io/kubernetes/pkg/util/async"
    59  	utiliptables "k8s.io/kubernetes/pkg/util/iptables"
    60  	iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing"
    61  	netutils "k8s.io/utils/net"
    62  	"k8s.io/utils/ptr"
    63  )
    64  
    65  // Conventions for tests using NewFakeProxier:
    66  //
    67  // Pod IPs:             10.0.0.0/8
    68  // Service ClusterIPs:  172.30.0.0/16
    69  // Node IPs:            192.168.0.0/24
    70  // Local Node IP:       192.168.0.2
    71  // Service ExternalIPs: 192.168.99.0/24
    72  // LoadBalancer IPs:    1.2.3.4, 5.6.7.8, 9.10.11.12
    73  // Non-cluster IPs:     203.0.113.0/24
    74  // LB Source Range:     203.0.113.0/25
    75  
    76  const testHostname = "test-hostname"
    77  const testNodeIP = "192.168.0.2"
    78  const testNodeIPAlt = "192.168.1.2"
    79  const testExternalIP = "192.168.99.11"
    80  const testNodeIPv6 = "2001:db8::1"
    81  const testNodeIPv6Alt = "2001:db8:1::2"
    82  const testExternalClient = "203.0.113.2"
    83  const testExternalClientBlocked = "203.0.113.130"
    84  
    85  var testNodeIPs = []string{testNodeIP, testNodeIPAlt, testExternalIP, testNodeIPv6, testNodeIPv6Alt}
    86  
    87  func NewFakeProxier(ipt utiliptables.Interface) *Proxier {
    88  	// TODO: Call NewProxier after refactoring out the goroutine
    89  	// invocation into a Run() method.
    90  	ipfamily := v1.IPv4Protocol
    91  	podCIDR := "10.0.0.0/8"
    92  	if ipt.IsIPv6() {
    93  		ipfamily = v1.IPv6Protocol
    94  		podCIDR = "fd00:10::/64"
    95  	}
    96  	detectLocal, _ := proxyutiliptables.NewDetectLocalByCIDR(podCIDR)
    97  
    98  	networkInterfacer := proxyutiltest.NewFakeNetwork()
    99  	itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
   100  	addrs := []net.Addr{
   101  		&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)},
   102  		&net.IPNet{IP: netutils.ParseIPSloppy("::1/128"), Mask: net.CIDRMask(128, 128)},
   103  	}
   104  	networkInterfacer.AddInterfaceAddr(&itf, addrs)
   105  	itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}
   106  	addrs1 := []net.Addr{
   107  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIP), Mask: net.CIDRMask(24, 32)},
   108  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPAlt), Mask: net.CIDRMask(24, 32)},
   109  		&net.IPNet{IP: netutils.ParseIPSloppy(testExternalIP), Mask: net.CIDRMask(24, 32)},
   110  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPv6), Mask: net.CIDRMask(64, 128)},
   111  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPv6Alt), Mask: net.CIDRMask(64, 128)},
   112  	}
   113  	networkInterfacer.AddInterfaceAddr(&itf1, addrs1)
   114  
   115  	p := &Proxier{
   116  		svcPortMap:               make(proxy.ServicePortMap),
   117  		serviceChanges:           proxy.NewServiceChangeTracker(newServiceInfo, ipfamily, nil, nil),
   118  		endpointsMap:             make(proxy.EndpointsMap),
   119  		endpointsChanges:         proxy.NewEndpointsChangeTracker(testHostname, newEndpointInfo, ipfamily, nil, nil),
   120  		needFullSync:             true,
   121  		iptables:                 ipt,
   122  		masqueradeMark:           "0x4000",
   123  		conntrack:                conntrack.NewFake(),
   124  		localDetector:            detectLocal,
   125  		hostname:                 testHostname,
   126  		serviceHealthServer:      healthcheck.NewFakeServiceHealthServer(),
   127  		precomputedProbabilities: make([]string, 0, 1001),
   128  		iptablesData:             bytes.NewBuffer(nil),
   129  		existingFilterChainsData: bytes.NewBuffer(nil),
   130  		filterChains:             proxyutil.NewLineBuffer(),
   131  		filterRules:              proxyutil.NewLineBuffer(),
   132  		natChains:                proxyutil.NewLineBuffer(),
   133  		natRules:                 proxyutil.NewLineBuffer(),
   134  		nodeIP:                   netutils.ParseIPSloppy(testNodeIP),
   135  		localhostNodePorts:       true,
   136  		nodePortAddresses:        proxyutil.NewNodePortAddresses(ipfamily, nil, nil),
   137  		networkInterfacer:        networkInterfacer,
   138  	}
   139  	p.setInitialized(true)
   140  	p.syncRunner = async.NewBoundedFrequencyRunner("test-sync-runner", p.syncProxyRules, 0, time.Minute, 1)
   141  	return p
   142  }
   143  
   144  // parseIPTablesData takes iptables-save output and returns a map of table name to array of lines.
   145  func parseIPTablesData(ruleData string) (map[string][]string, error) {
   146  	// Split ruleData at the "COMMIT" lines; given valid input, this will result in
   147  	// one element for each table plus an extra empty element (since the ruleData
   148  	// should end with a "COMMIT" line).
   149  	rawTables := strings.Split(strings.TrimPrefix(ruleData, "\n"), "COMMIT\n")
   150  	nTables := len(rawTables) - 1
   151  	if nTables < 2 || rawTables[nTables] != "" {
   152  		return nil, fmt.Errorf("bad ruleData (%d tables)\n%s", nTables, ruleData)
   153  	}
   154  
   155  	tables := make(map[string][]string, nTables)
   156  	for i, table := range rawTables[:nTables] {
   157  		lines := strings.Split(strings.Trim(table, "\n"), "\n")
   158  		// The first line should be, eg, "*nat" or "*filter"
   159  		if lines[0][0] != '*' {
   160  			return nil, fmt.Errorf("bad ruleData (table %d starts with %q)", i+1, lines[0])
   161  		}
   162  		// add back the "COMMIT" line that got eaten by the strings.Split above
   163  		lines = append(lines, "COMMIT")
   164  		tables[lines[0][1:]] = lines
   165  	}
   166  
   167  	if tables["nat"] == nil {
   168  		return nil, fmt.Errorf("bad ruleData (no %q table)", "nat")
   169  	}
   170  	if tables["filter"] == nil {
   171  		return nil, fmt.Errorf("bad ruleData (no %q table)", "filter")
   172  	}
   173  	return tables, nil
   174  }
   175  
   176  func TestParseIPTablesData(t *testing.T) {
   177  	for _, tc := range []struct {
   178  		name   string
   179  		input  string
   180  		output map[string][]string
   181  		error  string
   182  	}{
   183  		{
   184  			name: "basic test",
   185  			input: dedent.Dedent(`
   186  				*filter
   187  				:KUBE-SERVICES - [0:0]
   188  				:KUBE-EXTERNAL-SERVICES - [0:0]
   189  				:KUBE-FORWARD - [0:0]
   190  				:KUBE-NODEPORTS - [0:0]
   191  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   192  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   193  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   194  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   195  				COMMIT
   196  				*nat
   197  				:KUBE-SERVICES - [0:0]
   198  				:KUBE-NODEPORTS - [0:0]
   199  				:KUBE-POSTROUTING - [0:0]
   200  				:KUBE-MARK-MASQ - [0:0]
   201  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   202  				:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
   203  				-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
   204  				-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
   205  				-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
   206  				-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
   207  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   208  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
   209  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
   210  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
   211  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
   212  				-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
   213  				COMMIT
   214  				`),
   215  			output: map[string][]string{
   216  				"filter": {
   217  					`*filter`,
   218  					`:KUBE-SERVICES - [0:0]`,
   219  					`:KUBE-EXTERNAL-SERVICES - [0:0]`,
   220  					`:KUBE-FORWARD - [0:0]`,
   221  					`:KUBE-NODEPORTS - [0:0]`,
   222  					`-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT`,
   223  					`-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP`,
   224  					`-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT`,
   225  					`-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT`,
   226  					`COMMIT`,
   227  				},
   228  				"nat": {
   229  					`*nat`,
   230  					`:KUBE-SERVICES - [0:0]`,
   231  					`:KUBE-NODEPORTS - [0:0]`,
   232  					`:KUBE-POSTROUTING - [0:0]`,
   233  					`:KUBE-MARK-MASQ - [0:0]`,
   234  					`:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]`,
   235  					`:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]`,
   236  					`-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN`,
   237  					`-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000`,
   238  					`-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE`,
   239  					`-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000`,
   240  					`-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O`,
   241  					`-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ`,
   242  					`-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ`,
   243  					`-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ`,
   244  					`-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80`,
   245  					`-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS`,
   246  					`COMMIT`,
   247  				},
   248  			},
   249  		},
   250  		{
   251  			name: "not enough tables",
   252  			input: dedent.Dedent(`
   253  				*filter
   254  				:KUBE-SERVICES - [0:0]
   255  				:KUBE-EXTERNAL-SERVICES - [0:0]
   256  				:KUBE-FORWARD - [0:0]
   257  				:KUBE-NODEPORTS - [0:0]
   258  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   259  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   260  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   261  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   262  				COMMIT
   263  				`),
   264  			error: "bad ruleData (1 tables)",
   265  		},
   266  		{
   267  			name: "trailing junk",
   268  			input: dedent.Dedent(`
   269  				*filter
   270  				:KUBE-SERVICES - [0:0]
   271  				:KUBE-EXTERNAL-SERVICES - [0:0]
   272  				:KUBE-FORWARD - [0:0]
   273  				:KUBE-NODEPORTS - [0:0]
   274  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   275  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   276  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   277  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   278  				COMMIT
   279  				*nat
   280  				:KUBE-SERVICES - [0:0]
   281  				:KUBE-EXTERNAL-SERVICES - [0:0]
   282  				:KUBE-FORWARD - [0:0]
   283  				:KUBE-NODEPORTS - [0:0]
   284  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   285  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   286  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   287  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   288  				COMMIT
   289  				junk
   290  				`),
   291  			error: "bad ruleData (2 tables)",
   292  		},
   293  		{
   294  			name: "bad start line",
   295  			input: dedent.Dedent(`
   296  				*filter
   297  				:KUBE-SERVICES - [0:0]
   298  				:KUBE-EXTERNAL-SERVICES - [0:0]
   299  				:KUBE-FORWARD - [0:0]
   300  				:KUBE-NODEPORTS - [0:0]
   301  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   302  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   303  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   304  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   305  				COMMIT
   306  				:KUBE-SERVICES - [0:0]
   307  				:KUBE-EXTERNAL-SERVICES - [0:0]
   308  				:KUBE-FORWARD - [0:0]
   309  				:KUBE-NODEPORTS - [0:0]
   310  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   311  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   312  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   313  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   314  				COMMIT
   315  				`),
   316  			error: `bad ruleData (table 2 starts with ":KUBE-SERVICES - [0:0]")`,
   317  		},
   318  		{
   319  			name: "no nat",
   320  			input: dedent.Dedent(`
   321  				*filter
   322  				:KUBE-SERVICES - [0:0]
   323  				:KUBE-EXTERNAL-SERVICES - [0:0]
   324  				:KUBE-FORWARD - [0:0]
   325  				:KUBE-NODEPORTS - [0:0]
   326  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   327  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   328  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   329  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   330  				COMMIT
   331  				*mangle
   332  				:KUBE-SERVICES - [0:0]
   333  				:KUBE-EXTERNAL-SERVICES - [0:0]
   334  				:KUBE-FORWARD - [0:0]
   335  				:KUBE-NODEPORTS - [0:0]
   336  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   337  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   338  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   339  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   340  				COMMIT
   341  				`),
   342  			error: `bad ruleData (no "nat" table)`,
   343  		},
   344  		{
   345  			name: "no filter",
   346  			input: dedent.Dedent(`
   347  				*mangle
   348  				:KUBE-SERVICES - [0:0]
   349  				:KUBE-EXTERNAL-SERVICES - [0:0]
   350  				:KUBE-FORWARD - [0:0]
   351  				:KUBE-NODEPORTS - [0:0]
   352  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   353  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   354  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   355  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   356  				COMMIT
   357  				*nat
   358  				:KUBE-SERVICES - [0:0]
   359  				:KUBE-EXTERNAL-SERVICES - [0:0]
   360  				:KUBE-FORWARD - [0:0]
   361  				:KUBE-NODEPORTS - [0:0]
   362  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   363  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   364  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   365  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   366  				COMMIT
   367  				`),
   368  			error: `bad ruleData (no "filter" table)`,
   369  		},
   370  	} {
   371  		t.Run(tc.name, func(t *testing.T) {
   372  			out, err := parseIPTablesData(tc.input)
   373  			if err == nil {
   374  				if tc.error != "" {
   375  					t.Errorf("unexpectedly did not get error")
   376  				} else {
   377  					assert.Equal(t, tc.output, out)
   378  				}
   379  			} else {
   380  				if tc.error == "" {
   381  					t.Errorf("got unexpected error: %v", err)
   382  				} else if !strings.HasPrefix(err.Error(), tc.error) {
   383  					t.Errorf("got wrong error: %v (expected %q)", err, tc.error)
   384  				}
   385  			}
   386  		})
   387  	}
   388  }
   389  
   390  func countRules(tableName utiliptables.Table, ruleData string) int {
   391  	dump, err := iptablestest.ParseIPTablesDump(ruleData)
   392  	if err != nil {
   393  		klog.ErrorS(err, "error parsing iptables rules")
   394  		return -1
   395  	}
   396  
   397  	rules := 0
   398  	table, err := dump.GetTable(tableName)
   399  	if err != nil {
   400  		klog.ErrorS(err, "can't find table", "table", tableName)
   401  		return -1
   402  	}
   403  
   404  	for _, c := range table.Chains {
   405  		rules += len(c.Rules)
   406  	}
   407  	return rules
   408  }
   409  
   410  func countRulesFromMetric(tableName utiliptables.Table) int {
   411  	numRulesFloat, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(tableName)))
   412  	if err != nil {
   413  		klog.ErrorS(err, "metrics are not registered?")
   414  		return -1
   415  	}
   416  	return int(numRulesFloat)
   417  }
   418  
   419  func countRulesFromLastSyncMetric(tableName utiliptables.Table) int {
   420  	numRulesFloat, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesLastSync.WithLabelValues(string(tableName)))
   421  	if err != nil {
   422  		klog.ErrorS(err, "metrics are not registered?")
   423  		return -1
   424  	}
   425  	return int(numRulesFloat)
   426  }
   427  
   428  // findAllMatches takes an array of lines and a pattern with one parenthesized group, and
   429  // returns a sorted array of all of the unique matches of the parenthesized group.
   430  func findAllMatches(lines []string, pattern string) []string {
   431  	regex := regexp.MustCompile(pattern)
   432  	allMatches := sets.New[string]()
   433  	for _, line := range lines {
   434  		match := regex.FindStringSubmatch(line)
   435  		if len(match) == 2 {
   436  			allMatches.Insert(match[1])
   437  		}
   438  	}
   439  	return sets.List(allMatches)
   440  }
   441  
   442  // checkIPTablesRuleJumps checks that every `-j` in the given rules jumps to a chain
   443  // that we created and added rules to
   444  func checkIPTablesRuleJumps(ruleData string) error {
   445  	tables, err := parseIPTablesData(ruleData)
   446  	if err != nil {
   447  		return err
   448  	}
   449  
   450  	for tableName, lines := range tables {
   451  		// Find all of the lines like ":KUBE-SERVICES", indicating chains that
   452  		// iptables-restore would create when loading the data.
   453  		createdChains := sets.New[string](findAllMatches(lines, `^:([^ ]*)`)...)
   454  		// Find all of the lines like "-X KUBE-SERVICES ..." indicating chains
   455  		// that we are deleting because they are no longer used, and remove
   456  		// those chains from createdChains.
   457  		createdChains = createdChains.Delete(findAllMatches(lines, `-X ([^ ]*)`)...)
   458  
   459  		// Find all of the lines like "-A KUBE-SERVICES ..." indicating chains
   460  		// that we are adding at least one rule to.
   461  		filledChains := sets.New[string](findAllMatches(lines, `-A ([^ ]*)`)...)
   462  
   463  		// Find all of the chains that are jumped to by some rule so we can make
   464  		// sure we only jump to valid chains.
   465  		jumpedChains := sets.New[string](findAllMatches(lines, `-j ([^ ]*)`)...)
   466  		// Ignore jumps to chains that we expect to exist even if kube-proxy
   467  		// didn't create them itself.
   468  		jumpedChains.Delete("ACCEPT", "REJECT", "DROP", "MARK", "RETURN", "DNAT", "SNAT", "MASQUERADE")
   469  
   470  		// Find cases where we have "-A FOO ... -j BAR" but no ":BAR", meaning
   471  		// that we are jumping to a chain that was not created.
   472  		missingChains := jumpedChains.Difference(createdChains)
   473  		missingChains = missingChains.Union(filledChains.Difference(createdChains))
   474  		if len(missingChains) > 0 {
   475  			return fmt.Errorf("some chains in %s are used but were not created: %v", tableName, missingChains.UnsortedList())
   476  		}
   477  
   478  		// Find cases where we have "-A FOO ... -j BAR", but no "-A BAR ...",
   479  		// meaning that we are jumping to a chain that we didn't write out any
   480  		// rules for, which is normally a bug. (Except that KUBE-SERVICES always
   481  		// jumps to KUBE-NODEPORTS, even when there are no NodePort rules.)
   482  		emptyChains := jumpedChains.Difference(filledChains)
   483  		emptyChains.Delete(string(kubeNodePortsChain))
   484  		if len(emptyChains) > 0 {
   485  			return fmt.Errorf("some chains in %s are jumped to but have no rules: %v", tableName, emptyChains.UnsortedList())
   486  		}
   487  
   488  		// Find cases where we have ":BAR" but no "-A FOO ... -j BAR", meaning
   489  		// that we are creating an empty chain but not using it for anything.
   490  		extraChains := createdChains.Difference(jumpedChains)
   491  		extraChains.Delete(string(kubeServicesChain), string(kubeExternalServicesChain), string(kubeNodePortsChain), string(kubePostroutingChain), string(kubeForwardChain), string(kubeMarkMasqChain), string(kubeProxyFirewallChain), string(kubeletFirewallChain))
   492  		if len(extraChains) > 0 {
   493  			return fmt.Errorf("some chains in %s are created but not used: %v", tableName, extraChains.UnsortedList())
   494  		}
   495  	}
   496  
   497  	return nil
   498  }
   499  
   500  func TestCheckIPTablesRuleJumps(t *testing.T) {
   501  	for _, tc := range []struct {
   502  		name  string
   503  		input string
   504  		error string
   505  	}{
   506  		{
   507  			name: "valid",
   508  			input: dedent.Dedent(`
   509  				*filter
   510  				COMMIT
   511  				*nat
   512  				:KUBE-MARK-MASQ - [0:0]
   513  				:KUBE-SERVICES - [0:0]
   514  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   515  				-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
   516  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   517  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
   518  				COMMIT
   519  				`),
   520  			error: "",
   521  		},
   522  		{
   523  			name: "can't jump to chain that wasn't created",
   524  			input: dedent.Dedent(`
   525  				*filter
   526  				COMMIT
   527  				*nat
   528  				:KUBE-SERVICES - [0:0]
   529  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   530  				COMMIT
   531  				`),
   532  			error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   533  		},
   534  		{
   535  			name: "can't jump to chain that has no rules",
   536  			input: dedent.Dedent(`
   537  				*filter
   538  				COMMIT
   539  				*nat
   540  				:KUBE-SERVICES - [0:0]
   541  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   542  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   543  				COMMIT
   544  				`),
   545  			error: "some chains in nat are jumped to but have no rules: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   546  		},
   547  		{
   548  			name: "can't add rules to a chain that wasn't created",
   549  			input: dedent.Dedent(`
   550  				*filter
   551  				COMMIT
   552  				*nat
   553  				:KUBE-MARK-MASQ - [0:0]
   554  				:KUBE-SERVICES - [0:0]
   555  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" ...
   556  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   557  				COMMIT
   558  				`),
   559  			error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   560  		},
   561  		{
   562  			name: "can't jump to chain that wasn't created",
   563  			input: dedent.Dedent(`
   564  				*filter
   565  				COMMIT
   566  				*nat
   567  				:KUBE-SERVICES - [0:0]
   568  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   569  				COMMIT
   570  				`),
   571  			error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   572  		},
   573  		{
   574  			name: "can't jump to chain that has no rules",
   575  			input: dedent.Dedent(`
   576  				*filter
   577  				COMMIT
   578  				*nat
   579  				:KUBE-SERVICES - [0:0]
   580  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   581  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   582  				COMMIT
   583  				`),
   584  			error: "some chains in nat are jumped to but have no rules: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   585  		},
   586  		{
   587  			name: "can't add rules to a chain that wasn't created",
   588  			input: dedent.Dedent(`
   589  				*filter
   590  				COMMIT
   591  				*nat
   592  				:KUBE-MARK-MASQ - [0:0]
   593  				:KUBE-SERVICES - [0:0]
   594  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" ...
   595  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   596  				COMMIT
   597  				`),
   598  			error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   599  		},
   600  		{
   601  			name: "can't create chain and then not use it",
   602  			input: dedent.Dedent(`
   603  				*filter
   604  				COMMIT
   605  				*nat
   606  				:KUBE-MARK-MASQ - [0:0]
   607  				:KUBE-SERVICES - [0:0]
   608  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   609  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" ...
   610  				COMMIT
   611  				`),
   612  			error: "some chains in nat are created but not used: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   613  		},
   614  	} {
   615  		t.Run(tc.name, func(t *testing.T) {
   616  			err := checkIPTablesRuleJumps(tc.input)
   617  			if err == nil {
   618  				if tc.error != "" {
   619  					t.Errorf("unexpectedly did not get error")
   620  				}
   621  			} else {
   622  				if tc.error == "" {
   623  					t.Errorf("got unexpected error: %v", err)
   624  				} else if !strings.HasPrefix(err.Error(), tc.error) {
   625  					t.Errorf("got wrong error: %v (expected %q)", err, tc.error)
   626  				}
   627  			}
   628  		})
   629  	}
   630  }
   631  
   632  // orderByCommentServiceName is a helper function that orders two IPTables rules
   633  // based on the service name in their comment. (If either rule has no comment then the
   634  // return value is undefined.)
   635  func orderByCommentServiceName(rule1, rule2 *iptablestest.Rule) bool {
   636  	if rule1.Comment == nil || rule2.Comment == nil {
   637  		return false
   638  	}
   639  	name1, name2 := rule1.Comment.Value, rule2.Comment.Value
   640  
   641  	// The service name is the comment up to the first space or colon
   642  	i := strings.IndexAny(name1, " :")
   643  	if i != -1 {
   644  		name1 = name1[:i]
   645  	}
   646  	i = strings.IndexAny(name2, " :")
   647  	if i != -1 {
   648  		name2 = name2[:i]
   649  	}
   650  
   651  	return name1 < name2
   652  }
   653  
   654  // sortIPTablesRules sorts `iptables-restore` output so as to not depend on the order that
   655  // Services get processed in, while preserving the relative ordering of related rules.
   656  func sortIPTablesRules(ruleData string) (string, error) {
   657  	dump, err := iptablestest.ParseIPTablesDump(ruleData)
   658  	if err != nil {
   659  		return "", err
   660  	}
   661  
   662  	// Sort tables
   663  	sort.Slice(dump.Tables, func(i, j int) bool {
   664  		return dump.Tables[i].Name < dump.Tables[j].Name
   665  	})
   666  
   667  	// Sort chains
   668  	for t := range dump.Tables {
   669  		table := &dump.Tables[t]
   670  		sort.Slice(table.Chains, func(i, j int) bool {
   671  			switch {
   672  			case table.Chains[i].Name == kubeNodePortsChain:
   673  				// KUBE-NODEPORTS comes before anything
   674  				return true
   675  			case table.Chains[j].Name == kubeNodePortsChain:
   676  				// anything goes after KUBE-NODEPORTS
   677  				return false
   678  			case table.Chains[i].Name == kubeServicesChain:
   679  				// KUBE-SERVICES comes before anything (except KUBE-NODEPORTS)
   680  				return true
   681  			case table.Chains[j].Name == kubeServicesChain:
   682  				// anything (except KUBE-NODEPORTS) goes after KUBE-SERVICES
   683  				return false
   684  			case strings.HasPrefix(string(table.Chains[i].Name), "KUBE-") && !strings.HasPrefix(string(table.Chains[j].Name), "KUBE-"):
   685  				// KUBE-* comes before non-KUBE-*
   686  				return true
   687  			case !strings.HasPrefix(string(table.Chains[i].Name), "KUBE-") && strings.HasPrefix(string(table.Chains[j].Name), "KUBE-"):
   688  				// non-KUBE-* goes after KUBE-*
   689  				return false
   690  			default:
   691  				// We have two KUBE-* chains or two non-KUBE-* chains; either
   692  				// way they sort alphabetically
   693  				return table.Chains[i].Name < table.Chains[j].Name
   694  			}
   695  		})
   696  	}
   697  
   698  	// Sort KUBE-NODEPORTS chains by service name
   699  	chain, _ := dump.GetChain(utiliptables.TableFilter, kubeNodePortsChain)
   700  	if chain != nil {
   701  		sort.SliceStable(chain.Rules, func(i, j int) bool {
   702  			return orderByCommentServiceName(chain.Rules[i], chain.Rules[j])
   703  		})
   704  	}
   705  	chain, _ = dump.GetChain(utiliptables.TableNAT, kubeNodePortsChain)
   706  	if chain != nil {
   707  		sort.SliceStable(chain.Rules, func(i, j int) bool {
   708  			return orderByCommentServiceName(chain.Rules[i], chain.Rules[j])
   709  		})
   710  	}
   711  
   712  	// Sort KUBE-SERVICES chains by service name (but keeping the "must be the last
   713  	// rule" rule in the "nat" table's KUBE-SERVICES chain last).
   714  	chain, _ = dump.GetChain(utiliptables.TableFilter, kubeServicesChain)
   715  	if chain != nil {
   716  		sort.SliceStable(chain.Rules, func(i, j int) bool {
   717  			return orderByCommentServiceName(chain.Rules[i], chain.Rules[j])
   718  		})
   719  	}
   720  	chain, _ = dump.GetChain(utiliptables.TableNAT, kubeServicesChain)
   721  	if chain != nil {
   722  		sort.SliceStable(chain.Rules, func(i, j int) bool {
   723  			if chain.Rules[i].Comment != nil && strings.Contains(chain.Rules[i].Comment.Value, "must be the last rule") {
   724  				return false
   725  			} else if chain.Rules[j].Comment != nil && strings.Contains(chain.Rules[j].Comment.Value, "must be the last rule") {
   726  				return true
   727  			}
   728  			return orderByCommentServiceName(chain.Rules[i], chain.Rules[j])
   729  		})
   730  	}
   731  
   732  	return dump.String(), nil
   733  }
   734  
   735  func TestSortIPTablesRules(t *testing.T) {
   736  	for _, tc := range []struct {
   737  		name   string
   738  		input  string
   739  		output string
   740  		error  string
   741  	}{
   742  		{
   743  			name: "basic test using each match type",
   744  			input: dedent.Dedent(`
   745  				*filter
   746  				:KUBE-SERVICES - [0:0]
   747  				:KUBE-EXTERNAL-SERVICES - [0:0]
   748  				:KUBE-FIREWALL - [0:0]
   749  				:KUBE-FORWARD - [0:0]
   750  				:KUBE-NODEPORTS - [0:0]
   751  				:KUBE-PROXY-FIREWALL - [0:0]
   752  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   753  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j DROP
   754  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP
   755  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j DROP
   756  				-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
   757  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   758  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   759  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   760  				-A KUBE-PROXY-FIREWALL -m comment --comment "ns5/svc5:p80 traffic not accepted by KUBE-FW-NUKIZ6OKUXPJNT4C" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j DROP
   761  				COMMIT
   762  				*nat
   763  				:KUBE-SERVICES - [0:0]
   764  				:KUBE-NODEPORTS - [0:0]
   765  				:KUBE-POSTROUTING - [0:0]
   766  				:KUBE-MARK-MASQ - [0:0]
   767  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   768  				:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
   769  				:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
   770  				:KUBE-EXT-GNZBNJ2PO5MGZ6GT - [0:0]
   771  				:KUBE-SVL-GNZBNJ2PO5MGZ6GT - [0:0]
   772  				:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
   773  				:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
   774  				:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
   775  				:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
   776  				:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
   777  				:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
   778  				:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
   779  				-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
   780  				-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
   781  				-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
   782  				-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
   783  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   784  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   785  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
   786  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
   787  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
   788  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
   789  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
   790  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
   791  				-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   792  				-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment ns2/svc2:p80 -j KUBE-SEP-RS4RBKLTHTF2IUXJ
   793  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
   794  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
   795  				-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
   796  				-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "other traffic to s2/svc2:p80 will be dropped by KUBE-PROXY-FIREWALL"
   797  				-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
   798  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
   799  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
   800  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
   801  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -j KUBE-SVL-GNZBNJ2PO5MGZ6GT
   802  				-A KUBE-SVL-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
   803  				-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
   804  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   805  				-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-SVC-X27LE4BHSL4DOUIK
   806  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-MARK-MASQ
   807  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -j KUBE-SEP-OYPFS5VJICHGATKP
   808  				-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
   809  				-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
   810  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
   811  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
   812  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   813  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   814  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
   815  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -j KUBE-SEP-C6EBXVWJJZMIWKLZ
   816  				-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
   817  				-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
   818  				-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
   819  				-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
   820  				-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
   821  				COMMIT
   822  				`),
   823  			output: dedent.Dedent(`
   824  				*filter
   825  				:KUBE-NODEPORTS - [0:0]
   826  				:KUBE-SERVICES - [0:0]
   827  				:KUBE-EXTERNAL-SERVICES - [0:0]
   828  				:KUBE-FIREWALL - [0:0]
   829  				:KUBE-FORWARD - [0:0]
   830  				:KUBE-PROXY-FIREWALL - [0:0]
   831  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   832  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j DROP
   833  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP
   834  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j DROP
   835  				-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
   836  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   837  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   838  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   839  				-A KUBE-PROXY-FIREWALL -m comment --comment "ns5/svc5:p80 traffic not accepted by KUBE-FW-NUKIZ6OKUXPJNT4C" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j DROP
   840  				COMMIT
   841  				*nat
   842  				:KUBE-NODEPORTS - [0:0]
   843  				:KUBE-SERVICES - [0:0]
   844  				:KUBE-EXT-GNZBNJ2PO5MGZ6GT - [0:0]
   845  				:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
   846  				:KUBE-MARK-MASQ - [0:0]
   847  				:KUBE-POSTROUTING - [0:0]
   848  				:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
   849  				:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
   850  				:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
   851  				:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
   852  				:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
   853  				:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
   854  				:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
   855  				:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
   856  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   857  				:KUBE-SVL-GNZBNJ2PO5MGZ6GT - [0:0]
   858  				-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
   859  				-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-SVC-X27LE4BHSL4DOUIK
   860  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   861  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
   862  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
   863  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
   864  				-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
   865  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
   866  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
   867  				-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
   868  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
   869  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
   870  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
   871  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -j KUBE-SVL-GNZBNJ2PO5MGZ6GT
   872  				-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
   873  				-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "other traffic to s2/svc2:p80 will be dropped by KUBE-PROXY-FIREWALL"
   874  				-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
   875  				-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
   876  				-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
   877  				-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
   878  				-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
   879  				-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
   880  				-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
   881  				-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
   882  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
   883  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
   884  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
   885  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
   886  				-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
   887  				-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
   888  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   889  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   890  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
   891  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -j KUBE-SEP-C6EBXVWJJZMIWKLZ
   892  				-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   893  				-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment ns2/svc2:p80 -j KUBE-SEP-RS4RBKLTHTF2IUXJ
   894  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   895  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-MARK-MASQ
   896  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -j KUBE-SEP-OYPFS5VJICHGATKP
   897  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   898  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
   899  				-A KUBE-SVL-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
   900  				COMMIT
   901  				`),
   902  		},
   903  		{
   904  			name: "extra tables",
   905  			input: dedent.Dedent(`
   906  				*filter
   907  				:KUBE-SERVICES - [0:0]
   908  				:KUBE-EXTERNAL-SERVICES - [0:0]
   909  				:KUBE-FORWARD - [0:0]
   910  				:KUBE-NODEPORTS - [0:0]
   911  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   912  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   913  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   914  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   915  				COMMIT
   916  				*nat
   917  				:KUBE-SERVICES - [0:0]
   918  				:KUBE-EXTERNAL-SERVICES - [0:0]
   919  				:KUBE-FORWARD - [0:0]
   920  				:KUBE-NODEPORTS - [0:0]
   921  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   922  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   923  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   924  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   925  				COMMIT
   926  				*mangle
   927  				:KUBE-SERVICES - [0:0]
   928  				:KUBE-EXTERNAL-SERVICES - [0:0]
   929  				:KUBE-FORWARD - [0:0]
   930  				:KUBE-NODEPORTS - [0:0]
   931  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   932  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   933  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   934  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   935  				COMMIT
   936  				`),
   937  			output: dedent.Dedent(`
   938  				*filter
   939  				:KUBE-NODEPORTS - [0:0]
   940  				:KUBE-SERVICES - [0:0]
   941  				:KUBE-EXTERNAL-SERVICES - [0:0]
   942  				:KUBE-FORWARD - [0:0]
   943  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   944  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   945  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   946  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   947  				COMMIT
   948  				*mangle
   949  				:KUBE-NODEPORTS - [0:0]
   950  				:KUBE-SERVICES - [0:0]
   951  				:KUBE-EXTERNAL-SERVICES - [0:0]
   952  				:KUBE-FORWARD - [0:0]
   953  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   954  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   955  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   956  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   957  				COMMIT
   958  				*nat
   959  				:KUBE-NODEPORTS - [0:0]
   960  				:KUBE-SERVICES - [0:0]
   961  				:KUBE-EXTERNAL-SERVICES - [0:0]
   962  				:KUBE-FORWARD - [0:0]
   963  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   964  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   965  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   966  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   967  				COMMIT
   968  				`),
   969  		},
   970  		{
   971  			name: "correctly match same service name in different styles of comments",
   972  			input: dedent.Dedent(`
   973  				*filter
   974  				COMMIT
   975  				*nat
   976  				:KUBE-SERVICES - [0:0]
   977  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
   978  				-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
   979  				-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
   980  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" svc1 line 1
   981  				-A KUBE-SERVICES -m comment --comment ns1/svc1 svc1 line 2
   982  				-A KUBE-SERVICES -m comment --comment "ns1/svc1 blah" svc1 line 3
   983  				-A KUBE-SERVICES -m comment --comment ns4/svc4 svc4 line 1
   984  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" svc4 line 2
   985  				-A KUBE-SERVICES -m comment --comment "ns4/svc4 blah" svc4 line 3
   986  				-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" svc3 line 1
   987  				-A KUBE-SERVICES -m comment --comment "ns3/svc3 blah" svc3 line 2
   988  				-A KUBE-SERVICES -m comment --comment ns3/svc3 svc3 line 3
   989  				COMMIT
   990  				`),
   991  			output: dedent.Dedent(`
   992  				*filter
   993  				COMMIT
   994  				*nat
   995  				:KUBE-SERVICES - [0:0]
   996  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" svc1 line 1
   997  				-A KUBE-SERVICES -m comment --comment ns1/svc1 svc1 line 2
   998  				-A KUBE-SERVICES -m comment --comment "ns1/svc1 blah" svc1 line 3
   999  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
  1000  				-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
  1001  				-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
  1002  				-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" svc3 line 1
  1003  				-A KUBE-SERVICES -m comment --comment "ns3/svc3 blah" svc3 line 2
  1004  				-A KUBE-SERVICES -m comment --comment ns3/svc3 svc3 line 3
  1005  				-A KUBE-SERVICES -m comment --comment ns4/svc4 svc4 line 1
  1006  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" svc4 line 2
  1007  				-A KUBE-SERVICES -m comment --comment "ns4/svc4 blah" svc4 line 3
  1008  				COMMIT
  1009  				`),
  1010  		},
  1011  		{
  1012  			name: "unexpected junk lines are preserved",
  1013  			input: dedent.Dedent(`
  1014  				*filter
  1015  				COMMIT
  1016  				*nat
  1017  				:KUBE-SERVICES - [0:0]
  1018  				:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
  1019  				:KUBE-AAAAA - [0:0]
  1020  				:KUBE-ZZZZZ - [0:0]
  1021  				:WHY-IS-THIS-CHAIN-HERE - [0:0]
  1022  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
  1023  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
  1024  				-A KUBE-ZZZZZ -m comment --comment "mystery chain number 1"
  1025  				-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
  1026  				-A WHY-IS-THIS-CHAIN-HERE -j ACCEPT
  1027  				-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
  1028  				-A KUBE-AAAAA -m comment --comment "mystery chain number 2"
  1029  				COMMIT
  1030  				`),
  1031  			output: dedent.Dedent(`
  1032  				*filter
  1033  				COMMIT
  1034  				*nat
  1035  				:KUBE-SERVICES - [0:0]
  1036  				:KUBE-AAAAA - [0:0]
  1037  				:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
  1038  				:KUBE-ZZZZZ - [0:0]
  1039  				:WHY-IS-THIS-CHAIN-HERE - [0:0]
  1040  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
  1041  				-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
  1042  				-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
  1043  				-A KUBE-AAAAA -m comment --comment "mystery chain number 2"
  1044  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
  1045  				-A KUBE-ZZZZZ -m comment --comment "mystery chain number 1"
  1046  				-A WHY-IS-THIS-CHAIN-HERE -j ACCEPT
  1047  				COMMIT
  1048  				`),
  1049  		},
  1050  	} {
  1051  		t.Run(tc.name, func(t *testing.T) {
  1052  			out, err := sortIPTablesRules(tc.input)
  1053  			if err == nil {
  1054  				if tc.error != "" {
  1055  					t.Errorf("unexpectedly did not get error")
  1056  				} else {
  1057  					assert.Equal(t, strings.TrimPrefix(tc.output, "\n"), out)
  1058  				}
  1059  			} else {
  1060  				if tc.error == "" {
  1061  					t.Errorf("got unexpected error: %v", err)
  1062  				} else if !strings.HasPrefix(err.Error(), tc.error) {
  1063  					t.Errorf("got wrong error: %v (expected %q)", err, tc.error)
  1064  				}
  1065  			}
  1066  		})
  1067  	}
  1068  }
  1069  
  1070  // getLine returns the line number of the caller, if possible.  This is useful in
  1071  // tests with a large number of cases - when something goes wrong you can find
  1072  // which case more easily.
  1073  func getLine() int {
  1074  	_, _, line, ok := stdruntime.Caller(1)
  1075  	if ok {
  1076  		return line
  1077  	}
  1078  	return 0
  1079  }
  1080  
  1081  // assertIPTablesRulesEqual asserts that the generated rules in result match the rules in
  1082  // expected, ignoring irrelevant ordering differences. By default this also checks the
  1083  // rules for consistency (eg, no jumps to chains that aren't defined), but that can be
  1084  // disabled by passing false for checkConsistency if you are passing a partial set of rules.
  1085  func assertIPTablesRulesEqual(t *testing.T, line int, checkConsistency bool, expected, result string) {
  1086  	expected = strings.TrimLeft(expected, " \t\n")
  1087  
  1088  	result, err := sortIPTablesRules(strings.TrimLeft(result, " \t\n"))
  1089  	if err != nil {
  1090  		t.Fatalf("%s", err)
  1091  	}
  1092  
  1093  	lineStr := ""
  1094  	if line != 0 {
  1095  		lineStr = fmt.Sprintf(" (from line %d)", line)
  1096  	}
  1097  	if diff := cmp.Diff(expected, result); diff != "" {
  1098  		t.Errorf("rules do not match%s:\ndiff:\n%s\nfull result:\n```\n%s```", lineStr, diff, result)
  1099  	}
  1100  
  1101  	if checkConsistency {
  1102  		err = checkIPTablesRuleJumps(expected)
  1103  		if err != nil {
  1104  			t.Fatalf("%s%s", err, lineStr)
  1105  		}
  1106  	}
  1107  }
  1108  
  1109  // assertIPTablesChainEqual asserts that the indicated chain in the indicated table in
  1110  // result contains exactly the rules in expected (in that order).
  1111  func assertIPTablesChainEqual(t *testing.T, line int, table utiliptables.Table, chain utiliptables.Chain, expected, result string) {
  1112  	expected = strings.TrimLeft(expected, " \t\n")
  1113  
  1114  	dump, err := iptablestest.ParseIPTablesDump(strings.TrimLeft(result, " \t\n"))
  1115  	if err != nil {
  1116  		t.Fatalf("%s", err)
  1117  	}
  1118  
  1119  	result = ""
  1120  	if ch, _ := dump.GetChain(table, chain); ch != nil {
  1121  		for _, rule := range ch.Rules {
  1122  			result += rule.Raw + "\n"
  1123  		}
  1124  	}
  1125  
  1126  	lineStr := ""
  1127  	if line != 0 {
  1128  		lineStr = fmt.Sprintf(" (from line %d)", line)
  1129  	}
  1130  	if diff := cmp.Diff(expected, result); diff != "" {
  1131  		t.Errorf("rules do not match%s:\ndiff:\n%s\nfull result:\n```\n%s```", lineStr, diff, result)
  1132  	}
  1133  }
  1134  
  1135  // addressMatches helps test whether an iptables rule such as "! -s 192.168.0.0/16" matches
  1136  // ipStr. address.Value is either an IP address ("1.2.3.4") or a CIDR string
  1137  // ("1.2.3.0/24").
  1138  func addressMatches(t *testing.T, address *iptablestest.IPTablesValue, ipStr string) bool {
  1139  	ip := netutils.ParseIPSloppy(ipStr)
  1140  	if ip == nil {
  1141  		t.Fatalf("Bad IP in test case: %s", ipStr)
  1142  	}
  1143  
  1144  	var matches bool
  1145  	if strings.Contains(address.Value, "/") {
  1146  		_, cidr, err := netutils.ParseCIDRSloppy(address.Value)
  1147  		if err != nil {
  1148  			t.Errorf("Bad CIDR in kube-proxy output: %v", err)
  1149  		}
  1150  		matches = cidr.Contains(ip)
  1151  	} else {
  1152  		ip2 := netutils.ParseIPSloppy(address.Value)
  1153  		if ip2 == nil {
  1154  			t.Errorf("Bad IP/CIDR in kube-proxy output: %s", address.Value)
  1155  		}
  1156  		matches = ip.Equal(ip2)
  1157  	}
  1158  	return (!address.Negated && matches) || (address.Negated && !matches)
  1159  }
  1160  
  1161  // iptablesTracer holds data used while virtually tracing a packet through a set of
  1162  // iptables rules
  1163  type iptablesTracer struct {
  1164  	ipt      *iptablestest.FakeIPTables
  1165  	localIPs sets.Set[string]
  1166  	t        *testing.T
  1167  
  1168  	// matches accumulates the list of rules that were matched, for debugging purposes.
  1169  	matches []string
  1170  
  1171  	// outputs accumulates the list of matched terminal rule targets (endpoint
  1172  	// IP:ports, or a special target like "REJECT") and is eventually used to generate
  1173  	// the return value of tracePacket.
  1174  	outputs []string
  1175  
  1176  	// markMasq tracks whether the packet has been marked for masquerading
  1177  	markMasq bool
  1178  }
  1179  
  1180  // newIPTablesTracer creates an iptablesTracer. nodeIPs are the IPs to treat as local
  1181  // node IPs (for determining whether rules with "--src-type LOCAL" or "--dst-type LOCAL"
  1182  // match).
  1183  func newIPTablesTracer(t *testing.T, ipt *iptablestest.FakeIPTables, nodeIPs []string) *iptablesTracer {
  1184  	localIPs := sets.New("127.0.0.1", "::1")
  1185  	localIPs.Insert(nodeIPs...)
  1186  
  1187  	return &iptablesTracer{
  1188  		ipt:      ipt,
  1189  		localIPs: localIPs,
  1190  		t:        t,
  1191  	}
  1192  }
  1193  
  1194  // ruleMatches checks if the given iptables rule matches (at least probabilistically) a
  1195  // packet with the given sourceIP, destIP, and destPort.
  1196  func (tracer *iptablesTracer) ruleMatches(rule *iptablestest.Rule, sourceIP, protocol, destIP, destPort string) bool {
  1197  	// The sub-rules within an iptables rule are ANDed together, so the rule only
  1198  	// matches if all of them match. So go through the subrules, and if any of them
  1199  	// DON'T match, then fail.
  1200  
  1201  	if rule.SourceAddress != nil && !addressMatches(tracer.t, rule.SourceAddress, sourceIP) {
  1202  		return false
  1203  	}
  1204  	if rule.SourceType != nil {
  1205  		addrtype := "not-matched"
  1206  		if tracer.localIPs.Has(sourceIP) {
  1207  			addrtype = "LOCAL"
  1208  		}
  1209  		if !rule.SourceType.Matches(addrtype) {
  1210  			return false
  1211  		}
  1212  	}
  1213  
  1214  	if rule.Protocol != nil && !rule.Protocol.Matches(protocol) {
  1215  		return false
  1216  	}
  1217  
  1218  	if rule.DestinationAddress != nil && !addressMatches(tracer.t, rule.DestinationAddress, destIP) {
  1219  		return false
  1220  	}
  1221  	if rule.DestinationType != nil {
  1222  		addrtype := "not-matched"
  1223  		if tracer.localIPs.Has(destIP) {
  1224  			addrtype = "LOCAL"
  1225  		}
  1226  		if !rule.DestinationType.Matches(addrtype) {
  1227  			return false
  1228  		}
  1229  	}
  1230  	if rule.DestinationPort != nil && !rule.DestinationPort.Matches(destPort) {
  1231  		return false
  1232  	}
  1233  
  1234  	// Any rule that checks for past state/history does not match
  1235  	if rule.AffinityCheck != nil || rule.MarkCheck != nil || rule.CTStateCheck != nil {
  1236  		return false
  1237  	}
  1238  
  1239  	// Anything else is assumed to match
  1240  	return true
  1241  }
  1242  
  1243  // runChain runs the given packet through the rules in the given table and chain, updating
  1244  // tracer's internal state accordingly. It returns true if it hits a terminal action.
  1245  func (tracer *iptablesTracer) runChain(table utiliptables.Table, chain utiliptables.Chain, sourceIP, protocol, destIP, destPort string) bool {
  1246  	c, _ := tracer.ipt.Dump.GetChain(table, chain)
  1247  	if c == nil {
  1248  		return false
  1249  	}
  1250  
  1251  	for _, rule := range c.Rules {
  1252  		if rule.Jump == nil {
  1253  			continue
  1254  		}
  1255  
  1256  		if !tracer.ruleMatches(rule, sourceIP, protocol, destIP, destPort) {
  1257  			continue
  1258  		}
  1259  		// record the matched rule for debugging purposes
  1260  		tracer.matches = append(tracer.matches, rule.Raw)
  1261  
  1262  		switch rule.Jump.Value {
  1263  		case "KUBE-MARK-MASQ":
  1264  			tracer.markMasq = true
  1265  			continue
  1266  
  1267  		case "ACCEPT", "REJECT", "DROP":
  1268  			// (only valid in filter)
  1269  			tracer.outputs = append(tracer.outputs, rule.Jump.Value)
  1270  			return true
  1271  
  1272  		case "DNAT":
  1273  			// (only valid in nat)
  1274  			tracer.outputs = append(tracer.outputs, rule.DNATDestination.Value)
  1275  			return true
  1276  
  1277  		default:
  1278  			// We got a "-j KUBE-SOMETHING", so process that chain
  1279  			terminated := tracer.runChain(table, utiliptables.Chain(rule.Jump.Value), sourceIP, protocol, destIP, destPort)
  1280  
  1281  			// If the subchain hit a terminal rule AND the rule that sent us
  1282  			// to that chain was non-probabilistic, then this chain terminates
  1283  			// as well. But if we went there because of a --probability rule,
  1284  			// then we want to keep accumulating further matches against this
  1285  			// chain.
  1286  			if terminated && rule.Probability == nil {
  1287  				return true
  1288  			}
  1289  		}
  1290  	}
  1291  
  1292  	return false
  1293  }
  1294  
  1295  // tracePacket determines what would happen to a packet with the given sourceIP, protocol,
  1296  // destIP, and destPort, given the indicated iptables ruleData. nodeIP is the local node
  1297  // IP (for rules matching "LOCAL"). (The protocol value should be lowercase as in iptables
  1298  // rules, not uppercase as in corev1.)
  1299  //
  1300  // The return values are: an array of matched rules (for debugging), the final packet
  1301  // destinations (a comma-separated list of IPs, or one of the special targets "ACCEPT",
  1302  // "DROP", or "REJECT"), and whether the packet would be masqueraded.
  1303  func tracePacket(t *testing.T, ipt *iptablestest.FakeIPTables, sourceIP, protocol, destIP, destPort string, nodeIPs []string) ([]string, string, bool) {
  1304  	tracer := newIPTablesTracer(t, ipt, nodeIPs)
  1305  
  1306  	// nat:PREROUTING goes first
  1307  	tracer.runChain(utiliptables.TableNAT, utiliptables.ChainPrerouting, sourceIP, protocol, destIP, destPort)
  1308  
  1309  	// After the PREROUTING rules run, pending DNATs are processed (which would affect
  1310  	// the destination IP that later rules match against).
  1311  	if len(tracer.outputs) != 0 {
  1312  		destIP = strings.Split(tracer.outputs[0], ":")[0]
  1313  	}
  1314  
  1315  	// Now the filter rules get run; exactly which ones depend on whether this is an
  1316  	// inbound, outbound, or intra-host packet, which we don't know. So we just run
  1317  	// the interesting tables manually. (Theoretically this could cause conflicts in
  1318  	// the future in which case we'd have to do something more complicated.)
  1319  	tracer.runChain(utiliptables.TableFilter, kubeServicesChain, sourceIP, protocol, destIP, destPort)
  1320  	tracer.runChain(utiliptables.TableFilter, kubeExternalServicesChain, sourceIP, protocol, destIP, destPort)
  1321  	tracer.runChain(utiliptables.TableFilter, kubeNodePortsChain, sourceIP, protocol, destIP, destPort)
  1322  	tracer.runChain(utiliptables.TableFilter, kubeProxyFirewallChain, sourceIP, protocol, destIP, destPort)
  1323  
  1324  	// Finally, the nat:POSTROUTING rules run, but the only interesting thing that
  1325  	// happens there is that the masquerade mark gets turned into actual masquerading.
  1326  
  1327  	return tracer.matches, strings.Join(tracer.outputs, ", "), tracer.markMasq
  1328  }
  1329  
  1330  type packetFlowTest struct {
  1331  	name     string
  1332  	sourceIP string
  1333  	protocol v1.Protocol
  1334  	destIP   string
  1335  	destPort int
  1336  	output   string
  1337  	masq     bool
  1338  }
  1339  
  1340  func runPacketFlowTests(t *testing.T, line int, ipt *iptablestest.FakeIPTables, nodeIPs []string, testCases []packetFlowTest) {
  1341  	lineStr := ""
  1342  	if line != 0 {
  1343  		lineStr = fmt.Sprintf(" (from line %d)", line)
  1344  	}
  1345  	for _, tc := range testCases {
  1346  		t.Run(tc.name, func(t *testing.T) {
  1347  			protocol := strings.ToLower(string(tc.protocol))
  1348  			if protocol == "" {
  1349  				protocol = "tcp"
  1350  			}
  1351  			matches, output, masq := tracePacket(t, ipt, tc.sourceIP, protocol, tc.destIP, fmt.Sprintf("%d", tc.destPort), nodeIPs)
  1352  			var errors []string
  1353  			if output != tc.output {
  1354  				errors = append(errors, fmt.Sprintf("wrong output: expected %q got %q", tc.output, output))
  1355  			}
  1356  			if masq != tc.masq {
  1357  				errors = append(errors, fmt.Sprintf("wrong masq: expected %v got %v", tc.masq, masq))
  1358  			}
  1359  			if errors != nil {
  1360  				t.Errorf("Test %q of a %s packet from %s to %s:%d%s got result:\n%s\n\nBy matching:\n%s\n\n",
  1361  					tc.name, protocol, tc.sourceIP, tc.destIP, tc.destPort, lineStr, strings.Join(errors, "\n"), strings.Join(matches, "\n"))
  1362  			}
  1363  		})
  1364  	}
  1365  }
  1366  
  1367  // This tests tracePackets against static data, just to make sure we match things in the
  1368  // way we expect to.
  1369  func TestTracePackets(t *testing.T) {
  1370  	rules := dedent.Dedent(`
  1371  		*filter
  1372  		:INPUT - [0:0]
  1373  		:FORWARD - [0:0]
  1374  		:OUTPUT - [0:0]
  1375  		:KUBE-EXTERNAL-SERVICES - [0:0]
  1376  		:KUBE-FIREWALL - [0:0]
  1377  		:KUBE-FORWARD - [0:0]
  1378  		:KUBE-NODEPORTS - [0:0]
  1379  		:KUBE-SERVICES - [0:0]
  1380  		:KUBE-PROXY-FIREWALL - [0:0]
  1381  		-A INPUT -m comment --comment kubernetes health check service ports -j KUBE-NODEPORTS
  1382  		-A INPUT -m conntrack --ctstate NEW -m comment --comment kubernetes externally-visible service portals -j KUBE-EXTERNAL-SERVICES
  1383  		-A FORWARD -m comment --comment kubernetes forwarding rules -j KUBE-FORWARD
  1384  		-A FORWARD -m conntrack --ctstate NEW -m comment --comment kubernetes service portals -j KUBE-SERVICES
  1385  		-A FORWARD -m conntrack --ctstate NEW -m comment --comment kubernetes externally-visible service portals -j KUBE-EXTERNAL-SERVICES
  1386  		-A OUTPUT -m conntrack --ctstate NEW -m comment --comment kubernetes service portals -j KUBE-SERVICES
  1387  		-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
  1388  		-A KUBE-SERVICES -m comment --comment "ns6/svc6:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.46 --dport 80 -j REJECT
  1389  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j DROP
  1390  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP
  1391  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j DROP
  1392  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  1393  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  1394  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  1395  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  1396  		-A KUBE-PROXY-FIREWALL -m comment --comment "ns5/svc5:p80 traffic not accepted by KUBE-FW-NUKIZ6OKUXPJNT4C" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j DROP
  1397  		COMMIT
  1398  		*nat
  1399  		:PREROUTING - [0:0]
  1400  		:INPUT - [0:0]
  1401  		:OUTPUT - [0:0]
  1402  		:POSTROUTING - [0:0]
  1403  		:KUBE-EXT-4SW47YFZTEDKD3PK - [0:0]
  1404  		:KUBE-EXT-GNZBNJ2PO5MGZ6GT - [0:0]
  1405  		:KUBE-EXT-NUKIZ6OKUXPJNT4C - [0:0]
  1406  		:KUBE-EXT-X27LE4BHSL4DOUIK - [0:0]
  1407  		:KUBE-FW-NUKIZ6OKUXPJNT4C - [0:0]
  1408  		:KUBE-MARK-MASQ - [0:0]
  1409  		:KUBE-NODEPORTS - [0:0]
  1410  		:KUBE-POSTROUTING - [0:0]
  1411  		:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
  1412  		:KUBE-SEP-I77PXRDZVX7PMWMN - [0:0]
  1413  		:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
  1414  		:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
  1415  		:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
  1416  		:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
  1417  		:KUBE-SERVICES - [0:0]
  1418  		:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
  1419  		:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
  1420  		:KUBE-SVC-NUKIZ6OKUXPJNT4C - [0:0]
  1421  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  1422  		:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
  1423  		-A PREROUTING -m comment --comment kubernetes service portals -j KUBE-SERVICES
  1424  		-A OUTPUT -m comment --comment kubernetes service portals -j KUBE-SERVICES
  1425  		-A POSTROUTING -m comment --comment kubernetes postrouting rules -j KUBE-POSTROUTING
  1426  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  1427  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  1428  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  1429  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  1430  		-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1431  		-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3003 -j KUBE-EXT-X27LE4BHSL4DOUIK
  1432  		-A KUBE-NODEPORTS -m comment --comment ns5/svc5:p80 -m tcp -p tcp --dport 3002 -j KUBE-EXT-NUKIZ6OKUXPJNT4C
  1433  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  1434  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1435  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1436  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1437  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  1438  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  1439  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.33 --dport 80 -j KUBE-EXT-4SW47YFZTEDKD3PK
  1440  		-A KUBE-SERVICES -m comment --comment "ns5/svc5:p80 cluster IP" -m tcp -p tcp -d 172.30.0.45 --dport 80 -j KUBE-SVC-NUKIZ6OKUXPJNT4C
  1441  		-A KUBE-SERVICES -m comment --comment "ns5/svc5:p80 loadbalancer IP" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j KUBE-FW-NUKIZ6OKUXPJNT4C
  1442  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  1443  		-A KUBE-EXT-4SW47YFZTEDKD3PK -m comment --comment "masquerade traffic for ns4/svc4:p80 external destinations" -j KUBE-MARK-MASQ
  1444  		-A KUBE-EXT-4SW47YFZTEDKD3PK -j KUBE-SVC-4SW47YFZTEDKD3PK
  1445  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "pod traffic for ns2/svc2:p80 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1446  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
  1447  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1448  		-A KUBE-EXT-NUKIZ6OKUXPJNT4C -m comment --comment "masquerade traffic for ns5/svc5:p80 external destinations" -j KUBE-MARK-MASQ
  1449  		-A KUBE-EXT-NUKIZ6OKUXPJNT4C -j KUBE-SVC-NUKIZ6OKUXPJNT4C
  1450  		-A KUBE-EXT-X27LE4BHSL4DOUIK -m comment --comment "masquerade traffic for ns3/svc3:p80 external destinations" -j KUBE-MARK-MASQ
  1451  		-A KUBE-EXT-X27LE4BHSL4DOUIK -j KUBE-SVC-X27LE4BHSL4DOUIK
  1452  		-A KUBE-FW-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-NUKIZ6OKUXPJNT4C
  1453  		-A KUBE-FW-NUKIZ6OKUXPJNT4C -m comment --comment "other traffic to ns5/svc5:p80 will be dropped by KUBE-PROXY-FIREWALL"
  1454  		-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
  1455  		-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
  1456  		-A KUBE-SEP-I77PXRDZVX7PMWMN -m comment --comment ns5/svc5:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
  1457  		-A KUBE-SEP-I77PXRDZVX7PMWMN -m comment --comment ns5/svc5:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
  1458  		-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
  1459  		-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
  1460  		-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
  1461  		-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
  1462  		-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
  1463  		-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
  1464  		-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
  1465  		-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
  1466  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1467  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.180.0.4:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
  1468  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.180.0.5:80" -j KUBE-SEP-C6EBXVWJJZMIWKLZ
  1469  		-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1470  		-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 -> 10.180.0.2:80" -j KUBE-SEP-RS4RBKLTHTF2IUXJ
  1471  		-A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 cluster IP" -m tcp -p tcp -d 172.30.0.45 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1472  		-A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 -> 10.180.0.3:80" -j KUBE-SEP-I77PXRDZVX7PMWMN
  1473  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1474  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.180.0.3:80" -j KUBE-SEP-OYPFS5VJICHGATKP
  1475  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1476  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ
  1477  		COMMIT
  1478  		`)
  1479  
  1480  	ipt := iptablestest.NewFake()
  1481  	err := ipt.RestoreAll([]byte(rules), utiliptables.NoFlushTables, utiliptables.RestoreCounters)
  1482  	if err != nil {
  1483  		t.Fatalf("Restore of test data failed: %v", err)
  1484  	}
  1485  
  1486  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  1487  		{
  1488  			name:     "no match",
  1489  			sourceIP: "10.0.0.2",
  1490  			destIP:   "10.0.0.3",
  1491  			destPort: 80,
  1492  			output:   "",
  1493  		},
  1494  		{
  1495  			name:     "single endpoint",
  1496  			sourceIP: "10.0.0.2",
  1497  			destIP:   "172.30.0.41",
  1498  			destPort: 80,
  1499  			output:   "10.180.0.1:80",
  1500  		},
  1501  		{
  1502  			name:     "multiple endpoints",
  1503  			sourceIP: "10.0.0.2",
  1504  			destIP:   "172.30.0.44",
  1505  			destPort: 80,
  1506  			output:   "10.180.0.4:80, 10.180.0.5:80",
  1507  		},
  1508  		{
  1509  			name:     "LOCAL, KUBE-MARK-MASQ",
  1510  			sourceIP: testNodeIP,
  1511  			destIP:   "192.168.99.22",
  1512  			destPort: 80,
  1513  			output:   "10.180.0.2:80",
  1514  			masq:     true,
  1515  		},
  1516  		{
  1517  			name:     "DROP",
  1518  			sourceIP: testExternalClient,
  1519  			destIP:   "192.168.99.22",
  1520  			destPort: 80,
  1521  			output:   "DROP",
  1522  		},
  1523  		{
  1524  			name:     "ACCEPT (NodePortHealthCheck)",
  1525  			sourceIP: testNodeIP,
  1526  			destIP:   testNodeIP,
  1527  			destPort: 30000,
  1528  			output:   "ACCEPT",
  1529  		},
  1530  		{
  1531  			name:     "REJECT",
  1532  			sourceIP: "10.0.0.2",
  1533  			destIP:   "172.30.0.46",
  1534  			destPort: 80,
  1535  			output:   "REJECT",
  1536  		},
  1537  	})
  1538  }
  1539  
  1540  // TestOverallIPTablesRules creates a variety of services and verifies that the generated
  1541  // rules are exactly as expected.
  1542  func TestOverallIPTablesRules(t *testing.T) {
  1543  	ipt := iptablestest.NewFake()
  1544  	fp := NewFakeProxier(ipt)
  1545  	metrics.RegisterMetrics()
  1546  
  1547  	makeServiceMap(fp,
  1548  		// create ClusterIP service
  1549  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
  1550  			svc.Spec.ClusterIP = "172.30.0.41"
  1551  			svc.Spec.Ports = []v1.ServicePort{{
  1552  				Name:     "p80",
  1553  				Port:     80,
  1554  				Protocol: v1.ProtocolTCP,
  1555  			}}
  1556  		}),
  1557  		// create LoadBalancer service with Local traffic policy
  1558  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
  1559  			svc.Spec.Type = "LoadBalancer"
  1560  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  1561  			svc.Spec.ClusterIP = "172.30.0.42"
  1562  			svc.Spec.Ports = []v1.ServicePort{{
  1563  				Name:     "p80",
  1564  				Port:     80,
  1565  				Protocol: v1.ProtocolTCP,
  1566  				NodePort: 3001,
  1567  			}}
  1568  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  1569  				IP: "1.2.3.4",
  1570  			}}
  1571  			svc.Spec.ExternalIPs = []string{"192.168.99.22"}
  1572  			svc.Spec.HealthCheckNodePort = 30000
  1573  		}),
  1574  		// create NodePort service
  1575  		makeTestService("ns3", "svc3", func(svc *v1.Service) {
  1576  			svc.Spec.Type = "NodePort"
  1577  			svc.Spec.ClusterIP = "172.30.0.43"
  1578  			svc.Spec.Ports = []v1.ServicePort{{
  1579  				Name:     "p80",
  1580  				Port:     80,
  1581  				Protocol: v1.ProtocolTCP,
  1582  				NodePort: 3003,
  1583  			}}
  1584  		}),
  1585  		// create ExternalIP service
  1586  		makeTestService("ns4", "svc4", func(svc *v1.Service) {
  1587  			svc.Spec.Type = "NodePort"
  1588  			svc.Spec.ClusterIP = "172.30.0.44"
  1589  			svc.Spec.ExternalIPs = []string{"192.168.99.33"}
  1590  			svc.Spec.Ports = []v1.ServicePort{{
  1591  				Name:       "p80",
  1592  				Port:       80,
  1593  				Protocol:   v1.ProtocolTCP,
  1594  				TargetPort: intstr.FromInt32(80),
  1595  			}}
  1596  		}),
  1597  		// create LoadBalancer service with Cluster traffic policy, source ranges,
  1598  		// and session affinity
  1599  		makeTestService("ns5", "svc5", func(svc *v1.Service) {
  1600  			svc.Spec.Type = "LoadBalancer"
  1601  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster
  1602  			svc.Spec.ClusterIP = "172.30.0.45"
  1603  			svc.Spec.Ports = []v1.ServicePort{{
  1604  				Name:     "p80",
  1605  				Port:     80,
  1606  				Protocol: v1.ProtocolTCP,
  1607  				NodePort: 3002,
  1608  			}}
  1609  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  1610  				IP: "5.6.7.8",
  1611  			}}
  1612  			svc.Spec.HealthCheckNodePort = 30000
  1613  			// Extra whitespace to ensure that invalid value will not result
  1614  			// in a crash, for backward compatibility.
  1615  			svc.Spec.LoadBalancerSourceRanges = []string{" 203.0.113.0/25"}
  1616  
  1617  			svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
  1618  			svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
  1619  				ClientIP: &v1.ClientIPConfig{
  1620  					TimeoutSeconds: ptr.To[int32](10800),
  1621  				},
  1622  			}
  1623  		}),
  1624  		// create ClusterIP service with no endpoints
  1625  		makeTestService("ns6", "svc6", func(svc *v1.Service) {
  1626  			svc.Spec.Type = "ClusterIP"
  1627  			svc.Spec.ClusterIP = "172.30.0.46"
  1628  			svc.Spec.Ports = []v1.ServicePort{{
  1629  				Name:       "p80",
  1630  				Port:       80,
  1631  				Protocol:   v1.ProtocolTCP,
  1632  				TargetPort: intstr.FromInt32(80),
  1633  			}}
  1634  		}),
  1635  	)
  1636  	populateEndpointSlices(fp,
  1637  		// create ClusterIP service endpoints
  1638  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  1639  			eps.AddressType = discovery.AddressTypeIPv4
  1640  			eps.Endpoints = []discovery.Endpoint{{
  1641  				Addresses: []string{"10.180.0.1"},
  1642  			}}
  1643  			eps.Ports = []discovery.EndpointPort{{
  1644  				Name:     ptr.To("p80"),
  1645  				Port:     ptr.To[int32](80),
  1646  				Protocol: ptr.To(v1.ProtocolTCP),
  1647  			}}
  1648  		}),
  1649  		// create Local LoadBalancer endpoints. Note that since we aren't setting
  1650  		// its NodeName, this endpoint will be considered non-local and ignored.
  1651  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  1652  			eps.AddressType = discovery.AddressTypeIPv4
  1653  			eps.Endpoints = []discovery.Endpoint{{
  1654  				Addresses: []string{"10.180.0.2"},
  1655  			}}
  1656  			eps.Ports = []discovery.EndpointPort{{
  1657  				Name:     ptr.To("p80"),
  1658  				Port:     ptr.To[int32](80),
  1659  				Protocol: ptr.To(v1.ProtocolTCP),
  1660  			}}
  1661  		}),
  1662  		// create NodePort service endpoints
  1663  		makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
  1664  			eps.AddressType = discovery.AddressTypeIPv4
  1665  			eps.Endpoints = []discovery.Endpoint{{
  1666  				Addresses: []string{"10.180.0.3"},
  1667  			}}
  1668  			eps.Ports = []discovery.EndpointPort{{
  1669  				Name:     ptr.To("p80"),
  1670  				Port:     ptr.To[int32](80),
  1671  				Protocol: ptr.To(v1.ProtocolTCP),
  1672  			}}
  1673  		}),
  1674  		// create ExternalIP service endpoints
  1675  		makeTestEndpointSlice("ns4", "svc4", 1, func(eps *discovery.EndpointSlice) {
  1676  			eps.AddressType = discovery.AddressTypeIPv4
  1677  			eps.Endpoints = []discovery.Endpoint{{
  1678  				Addresses: []string{"10.180.0.4"},
  1679  			}, {
  1680  				Addresses: []string{"10.180.0.5"},
  1681  				NodeName:  ptr.To(testHostname),
  1682  			}}
  1683  			eps.Ports = []discovery.EndpointPort{{
  1684  				Name:     ptr.To("p80"),
  1685  				Port:     ptr.To[int32](80),
  1686  				Protocol: ptr.To(v1.ProtocolTCP),
  1687  			}}
  1688  		}),
  1689  		// create Cluster LoadBalancer endpoints
  1690  		makeTestEndpointSlice("ns5", "svc5", 1, func(eps *discovery.EndpointSlice) {
  1691  			eps.AddressType = discovery.AddressTypeIPv4
  1692  			eps.Endpoints = []discovery.Endpoint{{
  1693  				Addresses: []string{"10.180.0.3"},
  1694  			}}
  1695  			eps.Ports = []discovery.EndpointPort{{
  1696  				Name:     ptr.To("p80"),
  1697  				Port:     ptr.To[int32](80),
  1698  				Protocol: ptr.To(v1.ProtocolTCP),
  1699  			}}
  1700  		}),
  1701  	)
  1702  
  1703  	fp.syncProxyRules()
  1704  
  1705  	expected := dedent.Dedent(`
  1706  		*filter
  1707  		:KUBE-NODEPORTS - [0:0]
  1708  		:KUBE-SERVICES - [0:0]
  1709  		:KUBE-EXTERNAL-SERVICES - [0:0]
  1710  		:KUBE-FIREWALL - [0:0]
  1711  		:KUBE-FORWARD - [0:0]
  1712  		:KUBE-PROXY-FIREWALL - [0:0]
  1713  		-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
  1714  		-A KUBE-SERVICES -m comment --comment "ns6/svc6:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.46 --dport 80 -j REJECT
  1715  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j DROP
  1716  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP
  1717  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j DROP
  1718  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  1719  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  1720  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  1721  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  1722  		-A KUBE-PROXY-FIREWALL -m comment --comment "ns5/svc5:p80 traffic not accepted by KUBE-FW-NUKIZ6OKUXPJNT4C" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j DROP
  1723  		COMMIT
  1724  		*nat
  1725  		:KUBE-NODEPORTS - [0:0]
  1726  		:KUBE-SERVICES - [0:0]
  1727  		:KUBE-EXT-4SW47YFZTEDKD3PK - [0:0]
  1728  		:KUBE-EXT-GNZBNJ2PO5MGZ6GT - [0:0]
  1729  		:KUBE-EXT-NUKIZ6OKUXPJNT4C - [0:0]
  1730  		:KUBE-EXT-X27LE4BHSL4DOUIK - [0:0]
  1731  		:KUBE-FW-NUKIZ6OKUXPJNT4C - [0:0]
  1732  		:KUBE-MARK-MASQ - [0:0]
  1733  		:KUBE-POSTROUTING - [0:0]
  1734  		:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
  1735  		:KUBE-SEP-I77PXRDZVX7PMWMN - [0:0]
  1736  		:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
  1737  		:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
  1738  		:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
  1739  		:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
  1740  		:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
  1741  		:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
  1742  		:KUBE-SVC-NUKIZ6OKUXPJNT4C - [0:0]
  1743  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  1744  		:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
  1745  		-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1746  		-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3003 -j KUBE-EXT-X27LE4BHSL4DOUIK
  1747  		-A KUBE-NODEPORTS -m comment --comment ns5/svc5:p80 -m tcp -p tcp --dport 3002 -j KUBE-EXT-NUKIZ6OKUXPJNT4C
  1748  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  1749  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1750  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1751  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1752  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  1753  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  1754  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.33 --dport 80 -j KUBE-EXT-4SW47YFZTEDKD3PK
  1755  		-A KUBE-SERVICES -m comment --comment "ns5/svc5:p80 cluster IP" -m tcp -p tcp -d 172.30.0.45 --dport 80 -j KUBE-SVC-NUKIZ6OKUXPJNT4C
  1756  		-A KUBE-SERVICES -m comment --comment "ns5/svc5:p80 loadbalancer IP" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j KUBE-FW-NUKIZ6OKUXPJNT4C
  1757  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  1758  		-A KUBE-EXT-4SW47YFZTEDKD3PK -m comment --comment "masquerade traffic for ns4/svc4:p80 external destinations" -j KUBE-MARK-MASQ
  1759  		-A KUBE-EXT-4SW47YFZTEDKD3PK -j KUBE-SVC-4SW47YFZTEDKD3PK
  1760  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "pod traffic for ns2/svc2:p80 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1761  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
  1762  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1763  		-A KUBE-EXT-NUKIZ6OKUXPJNT4C -m comment --comment "masquerade traffic for ns5/svc5:p80 external destinations" -j KUBE-MARK-MASQ
  1764  		-A KUBE-EXT-NUKIZ6OKUXPJNT4C -j KUBE-SVC-NUKIZ6OKUXPJNT4C
  1765  		-A KUBE-EXT-X27LE4BHSL4DOUIK -m comment --comment "masquerade traffic for ns3/svc3:p80 external destinations" -j KUBE-MARK-MASQ
  1766  		-A KUBE-EXT-X27LE4BHSL4DOUIK -j KUBE-SVC-X27LE4BHSL4DOUIK
  1767  		-A KUBE-FW-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-NUKIZ6OKUXPJNT4C
  1768  		-A KUBE-FW-NUKIZ6OKUXPJNT4C -m comment --comment "other traffic to ns5/svc5:p80 will be dropped by KUBE-PROXY-FIREWALL"
  1769  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  1770  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  1771  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  1772  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  1773  		-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
  1774  		-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
  1775  		-A KUBE-SEP-I77PXRDZVX7PMWMN -m comment --comment ns5/svc5:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
  1776  		-A KUBE-SEP-I77PXRDZVX7PMWMN -m comment --comment ns5/svc5:p80 -m recent --name KUBE-SEP-I77PXRDZVX7PMWMN --set -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
  1777  		-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
  1778  		-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
  1779  		-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
  1780  		-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
  1781  		-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
  1782  		-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
  1783  		-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
  1784  		-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
  1785  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1786  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.180.0.4:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
  1787  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.180.0.5:80" -j KUBE-SEP-C6EBXVWJJZMIWKLZ
  1788  		-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1789  		-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 -> 10.180.0.2:80" -j KUBE-SEP-RS4RBKLTHTF2IUXJ
  1790  		-A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 cluster IP" -m tcp -p tcp -d 172.30.0.45 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1791  		-A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 -> 10.180.0.3:80" -m recent --name KUBE-SEP-I77PXRDZVX7PMWMN --rcheck --seconds 10800 --reap -j KUBE-SEP-I77PXRDZVX7PMWMN
  1792  		-A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 -> 10.180.0.3:80" -j KUBE-SEP-I77PXRDZVX7PMWMN
  1793  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1794  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.180.0.3:80" -j KUBE-SEP-OYPFS5VJICHGATKP
  1795  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1796  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ
  1797  		COMMIT
  1798  		`)
  1799  
  1800  	assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
  1801  
  1802  	nNatRules := countRulesFromMetric(utiliptables.TableNAT)
  1803  	expectedNatRules := countRules(utiliptables.TableNAT, fp.iptablesData.String())
  1804  
  1805  	if nNatRules != expectedNatRules {
  1806  		t.Fatalf("Wrong number of nat rules: expected %d received %d", expectedNatRules, nNatRules)
  1807  	}
  1808  }
  1809  
  1810  // TestNoEndpointsReject tests that a service with no endpoints rejects connections to
  1811  // its ClusterIP, ExternalIPs, NodePort, and LoadBalancer IP.
  1812  func TestNoEndpointsReject(t *testing.T) {
  1813  	ipt := iptablestest.NewFake()
  1814  	fp := NewFakeProxier(ipt)
  1815  	svcIP := "172.30.0.41"
  1816  	svcPort := 80
  1817  	svcNodePort := 3001
  1818  	svcExternalIPs := "192.168.99.11"
  1819  	svcLBIP := "1.2.3.4"
  1820  	svcPortName := proxy.ServicePortName{
  1821  		NamespacedName: makeNSN("ns1", "svc1"),
  1822  		Port:           "p80",
  1823  	}
  1824  
  1825  	makeServiceMap(fp,
  1826  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  1827  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1828  			svc.Spec.ClusterIP = svcIP
  1829  			svc.Spec.ExternalIPs = []string{svcExternalIPs}
  1830  			svc.Spec.Ports = []v1.ServicePort{{
  1831  				Name:     svcPortName.Port,
  1832  				Protocol: v1.ProtocolTCP,
  1833  				Port:     int32(svcPort),
  1834  				NodePort: int32(svcNodePort),
  1835  			}}
  1836  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  1837  				IP: svcLBIP,
  1838  			}}
  1839  		}),
  1840  	)
  1841  	fp.syncProxyRules()
  1842  
  1843  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  1844  		{
  1845  			name:     "pod to cluster IP with no endpoints",
  1846  			sourceIP: "10.0.0.2",
  1847  			destIP:   svcIP,
  1848  			destPort: svcPort,
  1849  			output:   "REJECT",
  1850  		},
  1851  		{
  1852  			name:     "external to external IP with no endpoints",
  1853  			sourceIP: testExternalClient,
  1854  			destIP:   svcExternalIPs,
  1855  			destPort: svcPort,
  1856  			output:   "REJECT",
  1857  		},
  1858  		{
  1859  			name:     "pod to NodePort with no endpoints",
  1860  			sourceIP: "10.0.0.2",
  1861  			destIP:   testNodeIP,
  1862  			destPort: svcNodePort,
  1863  			output:   "REJECT",
  1864  		},
  1865  		{
  1866  			name:     "external to NodePort with no endpoints",
  1867  			sourceIP: testExternalClient,
  1868  			destIP:   testNodeIP,
  1869  			destPort: svcNodePort,
  1870  			output:   "REJECT",
  1871  		},
  1872  		{
  1873  			name:     "pod to LoadBalancer IP with no endpoints",
  1874  			sourceIP: "10.0.0.2",
  1875  			destIP:   svcLBIP,
  1876  			destPort: svcPort,
  1877  			output:   "REJECT",
  1878  		},
  1879  		{
  1880  			name:     "external to LoadBalancer IP with no endpoints",
  1881  			sourceIP: testExternalClient,
  1882  			destIP:   svcLBIP,
  1883  			destPort: svcPort,
  1884  			output:   "REJECT",
  1885  		},
  1886  	})
  1887  }
  1888  
  1889  // TestClusterIPGeneral tests various basic features of a ClusterIP service
  1890  func TestClusterIPGeneral(t *testing.T) {
  1891  	ipt := iptablestest.NewFake()
  1892  	fp := NewFakeProxier(ipt)
  1893  
  1894  	makeServiceMap(fp,
  1895  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
  1896  			svc.Spec.ClusterIP = "172.30.0.41"
  1897  			svc.Spec.Ports = []v1.ServicePort{{
  1898  				Name:     "http",
  1899  				Port:     80,
  1900  				Protocol: v1.ProtocolTCP,
  1901  			}}
  1902  		}),
  1903  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
  1904  			svc.Spec.ClusterIP = "172.30.0.42"
  1905  			svc.Spec.Ports = []v1.ServicePort{
  1906  				{
  1907  					Name:     "http",
  1908  					Port:     80,
  1909  					Protocol: v1.ProtocolTCP,
  1910  				},
  1911  				{
  1912  					Name:       "https",
  1913  					Port:       443,
  1914  					Protocol:   v1.ProtocolTCP,
  1915  					TargetPort: intstr.FromInt32(8443),
  1916  				},
  1917  				{
  1918  					Name:     "dns-udp",
  1919  					Port:     53,
  1920  					Protocol: v1.ProtocolUDP,
  1921  				},
  1922  				{
  1923  					Name:     "dns-tcp",
  1924  					Port:     53,
  1925  					Protocol: v1.ProtocolTCP,
  1926  					// We use TargetPort on TCP but not UDP/SCTP to
  1927  					// help disambiguate the output.
  1928  					TargetPort: intstr.FromInt32(5353),
  1929  				},
  1930  				{
  1931  					Name:     "dns-sctp",
  1932  					Port:     53,
  1933  					Protocol: v1.ProtocolSCTP,
  1934  				},
  1935  			}
  1936  		}),
  1937  	)
  1938  
  1939  	populateEndpointSlices(fp,
  1940  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  1941  			eps.AddressType = discovery.AddressTypeIPv4
  1942  			eps.Endpoints = []discovery.Endpoint{{
  1943  				Addresses: []string{"10.180.0.1"},
  1944  				NodeName:  ptr.To(testHostname),
  1945  			}}
  1946  			eps.Ports = []discovery.EndpointPort{{
  1947  				Name:     ptr.To("http"),
  1948  				Port:     ptr.To[int32](80),
  1949  				Protocol: ptr.To(v1.ProtocolTCP),
  1950  			}}
  1951  		}),
  1952  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  1953  			eps.AddressType = discovery.AddressTypeIPv4
  1954  			eps.Endpoints = []discovery.Endpoint{
  1955  				{
  1956  					Addresses: []string{"10.180.0.1"},
  1957  					NodeName:  ptr.To(testHostname),
  1958  				},
  1959  				{
  1960  					Addresses: []string{"10.180.2.1"},
  1961  					NodeName:  ptr.To("host2"),
  1962  				},
  1963  			}
  1964  			eps.Ports = []discovery.EndpointPort{
  1965  				{
  1966  					Name:     ptr.To("http"),
  1967  					Port:     ptr.To[int32](80),
  1968  					Protocol: ptr.To(v1.ProtocolTCP),
  1969  				},
  1970  				{
  1971  					Name:     ptr.To("https"),
  1972  					Port:     ptr.To[int32](8443),
  1973  					Protocol: ptr.To(v1.ProtocolTCP),
  1974  				},
  1975  				{
  1976  					Name:     ptr.To("dns-udp"),
  1977  					Port:     ptr.To[int32](53),
  1978  					Protocol: ptr.To(v1.ProtocolUDP),
  1979  				},
  1980  				{
  1981  					Name:     ptr.To("dns-tcp"),
  1982  					Port:     ptr.To[int32](5353),
  1983  					Protocol: ptr.To(v1.ProtocolTCP),
  1984  				},
  1985  				{
  1986  					Name:     ptr.To("dns-sctp"),
  1987  					Port:     ptr.To[int32](53),
  1988  					Protocol: ptr.To(v1.ProtocolSCTP),
  1989  				},
  1990  			}
  1991  		}),
  1992  	)
  1993  
  1994  	fp.syncProxyRules()
  1995  
  1996  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  1997  		{
  1998  			name:     "simple clusterIP",
  1999  			sourceIP: "10.180.0.2",
  2000  			destIP:   "172.30.0.41",
  2001  			destPort: 80,
  2002  			output:   "10.180.0.1:80",
  2003  			masq:     false,
  2004  		},
  2005  		{
  2006  			name:     "hairpin to cluster IP",
  2007  			sourceIP: "10.180.0.1",
  2008  			destIP:   "172.30.0.41",
  2009  			destPort: 80,
  2010  			output:   "10.180.0.1:80",
  2011  			masq:     true,
  2012  		},
  2013  		{
  2014  			name:     "clusterIP with multiple endpoints",
  2015  			sourceIP: "10.180.0.2",
  2016  			destIP:   "172.30.0.42",
  2017  			destPort: 80,
  2018  			output:   "10.180.0.1:80, 10.180.2.1:80",
  2019  			masq:     false,
  2020  		},
  2021  		{
  2022  			name:     "clusterIP with TargetPort",
  2023  			sourceIP: "10.180.0.2",
  2024  			destIP:   "172.30.0.42",
  2025  			destPort: 443,
  2026  			output:   "10.180.0.1:8443, 10.180.2.1:8443",
  2027  			masq:     false,
  2028  		},
  2029  		{
  2030  			name:     "clusterIP with TCP, UDP, and SCTP on same port (TCP)",
  2031  			sourceIP: "10.180.0.2",
  2032  			protocol: v1.ProtocolTCP,
  2033  			destIP:   "172.30.0.42",
  2034  			destPort: 53,
  2035  			output:   "10.180.0.1:5353, 10.180.2.1:5353",
  2036  			masq:     false,
  2037  		},
  2038  		{
  2039  			name:     "clusterIP with TCP, UDP, and SCTP on same port (UDP)",
  2040  			sourceIP: "10.180.0.2",
  2041  			protocol: v1.ProtocolUDP,
  2042  			destIP:   "172.30.0.42",
  2043  			destPort: 53,
  2044  			output:   "10.180.0.1:53, 10.180.2.1:53",
  2045  			masq:     false,
  2046  		},
  2047  		{
  2048  			name:     "clusterIP with TCP, UDP, and SCTP on same port (SCTP)",
  2049  			sourceIP: "10.180.0.2",
  2050  			protocol: v1.ProtocolSCTP,
  2051  			destIP:   "172.30.0.42",
  2052  			destPort: 53,
  2053  			output:   "10.180.0.1:53, 10.180.2.1:53",
  2054  			masq:     false,
  2055  		},
  2056  		{
  2057  			name:     "TCP-only port does not match UDP traffic",
  2058  			sourceIP: "10.180.0.2",
  2059  			protocol: v1.ProtocolUDP,
  2060  			destIP:   "172.30.0.42",
  2061  			destPort: 80,
  2062  			output:   "",
  2063  		},
  2064  		{
  2065  			name:     "svc1 does not accept svc2's ports",
  2066  			sourceIP: "10.180.0.2",
  2067  			destIP:   "172.30.0.41",
  2068  			destPort: 443,
  2069  			output:   "",
  2070  		},
  2071  	})
  2072  }
  2073  
  2074  func TestLoadBalancer(t *testing.T) {
  2075  	ipt := iptablestest.NewFake()
  2076  	fp := NewFakeProxier(ipt)
  2077  	svcIP := "172.30.0.41"
  2078  	svcPort := 80
  2079  	svcNodePort := 3001
  2080  	svcLBIP1 := "1.2.3.4"
  2081  	svcLBIP2 := "5.6.7.8"
  2082  	svcPortName := proxy.ServicePortName{
  2083  		NamespacedName: makeNSN("ns1", "svc1"),
  2084  		Port:           "p80",
  2085  		Protocol:       v1.ProtocolTCP,
  2086  	}
  2087  
  2088  	makeServiceMap(fp,
  2089  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  2090  			svc.Spec.Type = "LoadBalancer"
  2091  			svc.Spec.ClusterIP = svcIP
  2092  			svc.Spec.Ports = []v1.ServicePort{{
  2093  				Name:     svcPortName.Port,
  2094  				Port:     int32(svcPort),
  2095  				Protocol: v1.ProtocolTCP,
  2096  				NodePort: int32(svcNodePort),
  2097  			}}
  2098  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{
  2099  				{IP: svcLBIP1},
  2100  				{IP: svcLBIP2},
  2101  			}
  2102  			svc.Spec.LoadBalancerSourceRanges = []string{
  2103  				"192.168.0.0/24",
  2104  
  2105  				// Regression test that excess whitespace gets ignored
  2106  				" 203.0.113.0/25",
  2107  			}
  2108  		}),
  2109  	)
  2110  
  2111  	epIP := "10.180.0.1"
  2112  	populateEndpointSlices(fp,
  2113  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  2114  			eps.AddressType = discovery.AddressTypeIPv4
  2115  			eps.Endpoints = []discovery.Endpoint{{
  2116  				Addresses: []string{epIP},
  2117  			}}
  2118  			eps.Ports = []discovery.EndpointPort{{
  2119  				Name:     ptr.To(svcPortName.Port),
  2120  				Port:     ptr.To(int32(svcPort)),
  2121  				Protocol: ptr.To(v1.ProtocolTCP),
  2122  			}}
  2123  		}),
  2124  	)
  2125  
  2126  	fp.syncProxyRules()
  2127  
  2128  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2129  		{
  2130  			name:     "pod to cluster IP",
  2131  			sourceIP: "10.0.0.2",
  2132  			destIP:   svcIP,
  2133  			destPort: svcPort,
  2134  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2135  			masq:     false,
  2136  		},
  2137  		{
  2138  			name:     "external to nodePort",
  2139  			sourceIP: testExternalClient,
  2140  			destIP:   testNodeIP,
  2141  			destPort: svcNodePort,
  2142  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2143  			masq:     true,
  2144  		},
  2145  		{
  2146  			name:     "nodePort bypasses LoadBalancerSourceRanges",
  2147  			sourceIP: testExternalClientBlocked,
  2148  			destIP:   testNodeIP,
  2149  			destPort: svcNodePort,
  2150  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2151  			masq:     true,
  2152  		},
  2153  		{
  2154  			name:     "accepted external to LB1",
  2155  			sourceIP: testExternalClient,
  2156  			destIP:   svcLBIP1,
  2157  			destPort: svcPort,
  2158  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2159  			masq:     true,
  2160  		},
  2161  		{
  2162  			name:     "accepted external to LB2",
  2163  			sourceIP: testExternalClient,
  2164  			destIP:   svcLBIP2,
  2165  			destPort: svcPort,
  2166  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2167  			masq:     true,
  2168  		},
  2169  		{
  2170  			name:     "blocked external to LB1",
  2171  			sourceIP: testExternalClientBlocked,
  2172  			destIP:   svcLBIP1,
  2173  			destPort: svcPort,
  2174  			output:   "DROP",
  2175  		},
  2176  		{
  2177  			name:     "blocked external to LB2",
  2178  			sourceIP: testExternalClientBlocked,
  2179  			destIP:   svcLBIP2,
  2180  			destPort: svcPort,
  2181  			output:   "DROP",
  2182  		},
  2183  		{
  2184  			name:     "pod to LB1 (blocked by LoadBalancerSourceRanges)",
  2185  			sourceIP: "10.0.0.2",
  2186  			destIP:   svcLBIP1,
  2187  			destPort: svcPort,
  2188  			output:   "DROP",
  2189  		},
  2190  		{
  2191  			name:     "pod to LB2 (blocked by LoadBalancerSourceRanges)",
  2192  			sourceIP: "10.0.0.2",
  2193  			destIP:   svcLBIP2,
  2194  			destPort: svcPort,
  2195  			output:   "DROP",
  2196  		},
  2197  		{
  2198  			name:     "node to LB1 (allowed by LoadBalancerSourceRanges)",
  2199  			sourceIP: testNodeIP,
  2200  			destIP:   svcLBIP1,
  2201  			destPort: svcPort,
  2202  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2203  			masq:     true,
  2204  		},
  2205  		{
  2206  			name:     "node to LB2 (allowed by LoadBalancerSourceRanges)",
  2207  			sourceIP: testNodeIP,
  2208  			destIP:   svcLBIP2,
  2209  			destPort: svcPort,
  2210  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2211  			masq:     true,
  2212  		},
  2213  
  2214  		// The LB rules assume that when you connect from a node to a LB IP, that
  2215  		// something external to kube-proxy will cause the connection to be
  2216  		// SNATted to the LB IP, so if the LoadBalancerSourceRanges include the
  2217  		// node IP, then we add a rule allowing traffic from the LB IP as well...
  2218  		{
  2219  			name:     "same node to LB1, SNATted to LB1 (implicitly allowed)",
  2220  			sourceIP: svcLBIP1,
  2221  			destIP:   svcLBIP1,
  2222  			destPort: svcPort,
  2223  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2224  			masq:     true,
  2225  		},
  2226  		{
  2227  			name:     "same node to LB2, SNATted to LB2 (implicitly allowed)",
  2228  			sourceIP: svcLBIP2,
  2229  			destIP:   svcLBIP2,
  2230  			destPort: svcPort,
  2231  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2232  			masq:     true,
  2233  		},
  2234  	})
  2235  }
  2236  
  2237  // TestNodePorts tests NodePort services under various combinations of the
  2238  // --nodeport-addresses and --localhost-nodeports flags.
  2239  func TestNodePorts(t *testing.T) {
  2240  	testCases := []struct {
  2241  		name string
  2242  
  2243  		family             v1.IPFamily
  2244  		localhostNodePorts bool
  2245  		nodePortAddresses  []string
  2246  
  2247  		// allowAltNodeIP is true if we expect NodePort traffic on the alternate
  2248  		// node IP to be accepted
  2249  		allowAltNodeIP bool
  2250  
  2251  		// expectFirewall is true if we expect KUBE-FIREWALL to be filled in with
  2252  		// an anti-martian-packet rule
  2253  		expectFirewall bool
  2254  	}{
  2255  		{
  2256  			name: "ipv4, localhost-nodeports enabled",
  2257  
  2258  			family:             v1.IPv4Protocol,
  2259  			localhostNodePorts: true,
  2260  			nodePortAddresses:  nil,
  2261  
  2262  			allowAltNodeIP: true,
  2263  			expectFirewall: true,
  2264  		},
  2265  		{
  2266  			name: "ipv4, localhost-nodeports disabled",
  2267  
  2268  			family:             v1.IPv4Protocol,
  2269  			localhostNodePorts: false,
  2270  			nodePortAddresses:  nil,
  2271  
  2272  			allowAltNodeIP: true,
  2273  			expectFirewall: false,
  2274  		},
  2275  		{
  2276  			name: "ipv4, localhost-nodeports disabled, localhost in nodeport-addresses",
  2277  
  2278  			family:             v1.IPv4Protocol,
  2279  			localhostNodePorts: false,
  2280  			nodePortAddresses:  []string{"192.168.0.0/24", "127.0.0.1/32"},
  2281  
  2282  			allowAltNodeIP: false,
  2283  			expectFirewall: false,
  2284  		},
  2285  		{
  2286  			name: "ipv4, localhost-nodeports enabled, multiple nodeport-addresses",
  2287  
  2288  			family:             v1.IPv4Protocol,
  2289  			localhostNodePorts: false,
  2290  			nodePortAddresses:  []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64"},
  2291  
  2292  			allowAltNodeIP: true,
  2293  			expectFirewall: false,
  2294  		},
  2295  		{
  2296  			name: "ipv6, localhost-nodeports enabled",
  2297  
  2298  			family:             v1.IPv6Protocol,
  2299  			localhostNodePorts: true,
  2300  			nodePortAddresses:  nil,
  2301  
  2302  			allowAltNodeIP: true,
  2303  			expectFirewall: false,
  2304  		},
  2305  		{
  2306  			name: "ipv6, localhost-nodeports disabled",
  2307  
  2308  			family:             v1.IPv6Protocol,
  2309  			localhostNodePorts: false,
  2310  			nodePortAddresses:  nil,
  2311  
  2312  			allowAltNodeIP: true,
  2313  			expectFirewall: false,
  2314  		},
  2315  		{
  2316  			name: "ipv6, localhost-nodeports disabled, multiple nodeport-addresses",
  2317  
  2318  			family:             v1.IPv6Protocol,
  2319  			localhostNodePorts: false,
  2320  			nodePortAddresses:  []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64"},
  2321  
  2322  			allowAltNodeIP: false,
  2323  			expectFirewall: false,
  2324  		},
  2325  	}
  2326  
  2327  	for _, tc := range testCases {
  2328  		t.Run(tc.name, func(t *testing.T) {
  2329  			var ipt *iptablestest.FakeIPTables
  2330  			var svcIP, epIP1, epIP2 string
  2331  			if tc.family == v1.IPv4Protocol {
  2332  				ipt = iptablestest.NewFake()
  2333  				svcIP = "172.30.0.41"
  2334  				epIP1 = "10.180.0.1"
  2335  				epIP2 = "10.180.2.1"
  2336  			} else {
  2337  				ipt = iptablestest.NewIPv6Fake()
  2338  				svcIP = "fd00:172:30::41"
  2339  				epIP1 = "fd00:10:180::1"
  2340  				epIP2 = "fd00:10:180::2:1"
  2341  			}
  2342  			fp := NewFakeProxier(ipt)
  2343  			fp.localhostNodePorts = tc.localhostNodePorts
  2344  			if tc.nodePortAddresses != nil {
  2345  				fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses, nil)
  2346  			}
  2347  
  2348  			makeServiceMap(fp,
  2349  				makeTestService("ns1", "svc1", func(svc *v1.Service) {
  2350  					svc.Spec.Type = v1.ServiceTypeNodePort
  2351  					svc.Spec.ClusterIP = svcIP
  2352  					svc.Spec.Ports = []v1.ServicePort{{
  2353  						Name:     "p80",
  2354  						Port:     80,
  2355  						Protocol: v1.ProtocolTCP,
  2356  						NodePort: 3001,
  2357  					}}
  2358  				}),
  2359  			)
  2360  
  2361  			populateEndpointSlices(fp,
  2362  				makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  2363  					if tc.family == v1.IPv4Protocol {
  2364  						eps.AddressType = discovery.AddressTypeIPv4
  2365  					} else {
  2366  						eps.AddressType = discovery.AddressTypeIPv6
  2367  					}
  2368  					eps.Endpoints = []discovery.Endpoint{{
  2369  						Addresses: []string{epIP1},
  2370  						NodeName:  nil,
  2371  					}, {
  2372  						Addresses: []string{epIP2},
  2373  						NodeName:  ptr.To(testHostname),
  2374  					}}
  2375  					eps.Ports = []discovery.EndpointPort{{
  2376  						Name:     ptr.To("p80"),
  2377  						Port:     ptr.To[int32](80),
  2378  						Protocol: ptr.To(v1.ProtocolTCP),
  2379  					}}
  2380  				}),
  2381  			)
  2382  
  2383  			fp.syncProxyRules()
  2384  
  2385  			var podIP, externalClientIP, nodeIP, altNodeIP, localhostIP string
  2386  			if tc.family == v1.IPv4Protocol {
  2387  				podIP = "10.0.0.2"
  2388  				externalClientIP = testExternalClient
  2389  				nodeIP = testNodeIP
  2390  				altNodeIP = testNodeIPAlt
  2391  				localhostIP = "127.0.0.1"
  2392  			} else {
  2393  				podIP = "fd00:10::2"
  2394  				externalClientIP = "2600:5200::1"
  2395  				nodeIP = testNodeIPv6
  2396  				altNodeIP = testNodeIPv6Alt
  2397  				localhostIP = "::1"
  2398  			}
  2399  			output := net.JoinHostPort(epIP1, "80") + ", " + net.JoinHostPort(epIP2, "80")
  2400  
  2401  			// Basic tests are the same for all cases
  2402  			runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2403  				{
  2404  					name:     "pod to cluster IP",
  2405  					sourceIP: podIP,
  2406  					destIP:   svcIP,
  2407  					destPort: 80,
  2408  					output:   output,
  2409  					masq:     false,
  2410  				},
  2411  				{
  2412  					name:     "external to nodePort",
  2413  					sourceIP: externalClientIP,
  2414  					destIP:   nodeIP,
  2415  					destPort: 3001,
  2416  					output:   output,
  2417  					masq:     true,
  2418  				},
  2419  				{
  2420  					name:     "node to nodePort",
  2421  					sourceIP: nodeIP,
  2422  					destIP:   nodeIP,
  2423  					destPort: 3001,
  2424  					output:   output,
  2425  					masq:     true,
  2426  				},
  2427  			})
  2428  
  2429  			// localhost to NodePort is only allowed in IPv4, and only if not disabled
  2430  			if tc.family == v1.IPv4Protocol && tc.localhostNodePorts {
  2431  				runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2432  					{
  2433  						name:     "localhost to nodePort gets masqueraded",
  2434  						sourceIP: localhostIP,
  2435  						destIP:   localhostIP,
  2436  						destPort: 3001,
  2437  						output:   output,
  2438  						masq:     true,
  2439  					},
  2440  				})
  2441  			} else {
  2442  				runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2443  					{
  2444  						name:     "localhost to nodePort is ignored",
  2445  						sourceIP: localhostIP,
  2446  						destIP:   localhostIP,
  2447  						destPort: 3001,
  2448  						output:   "",
  2449  					},
  2450  				})
  2451  			}
  2452  
  2453  			// NodePort on altNodeIP should be allowed, unless
  2454  			// nodePortAddressess excludes altNodeIP
  2455  			if tc.allowAltNodeIP {
  2456  				runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2457  					{
  2458  						name:     "external to nodePort on secondary IP",
  2459  						sourceIP: externalClientIP,
  2460  						destIP:   altNodeIP,
  2461  						destPort: 3001,
  2462  						output:   output,
  2463  						masq:     true,
  2464  					},
  2465  				})
  2466  			} else {
  2467  				runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2468  					{
  2469  						name:     "secondary nodeIP ignores NodePorts",
  2470  						sourceIP: externalClientIP,
  2471  						destIP:   altNodeIP,
  2472  						destPort: 3001,
  2473  						output:   "",
  2474  					},
  2475  				})
  2476  			}
  2477  
  2478  			// We have to check the firewall rule manually rather than via
  2479  			// runPacketFlowTests(), because the packet tracer doesn't
  2480  			// implement conntrack states.
  2481  			var expected string
  2482  			if tc.expectFirewall {
  2483  				expected = "-A KUBE-FIREWALL -m comment --comment \"block incoming localnet connections\" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP\n"
  2484  			}
  2485  			assertIPTablesChainEqual(t, getLine(), utiliptables.TableFilter, kubeletFirewallChain, expected, fp.iptablesData.String())
  2486  		})
  2487  	}
  2488  }
  2489  
  2490  func TestHealthCheckNodePort(t *testing.T) {
  2491  	ipt := iptablestest.NewFake()
  2492  	fp := NewFakeProxier(ipt)
  2493  	fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"}, nil)
  2494  
  2495  	svcIP := "172.30.0.42"
  2496  	svcPort := 80
  2497  	svcNodePort := 3001
  2498  	svcHealthCheckNodePort := 30000
  2499  	svcPortName := proxy.ServicePortName{
  2500  		NamespacedName: makeNSN("ns1", "svc1"),
  2501  		Port:           "p80",
  2502  		Protocol:       v1.ProtocolTCP,
  2503  	}
  2504  
  2505  	svc := makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  2506  		svc.Spec.Type = "LoadBalancer"
  2507  		svc.Spec.ClusterIP = svcIP
  2508  		svc.Spec.Ports = []v1.ServicePort{{
  2509  			Name:     svcPortName.Port,
  2510  			Port:     int32(svcPort),
  2511  			Protocol: v1.ProtocolTCP,
  2512  			NodePort: int32(svcNodePort),
  2513  		}}
  2514  		svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort)
  2515  		svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  2516  	})
  2517  	makeServiceMap(fp, svc)
  2518  	fp.syncProxyRules()
  2519  
  2520  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2521  		{
  2522  			name:     "firewall accepts HealthCheckNodePort",
  2523  			sourceIP: "1.2.3.4",
  2524  			destIP:   testNodeIP,
  2525  			destPort: svcHealthCheckNodePort,
  2526  			output:   "ACCEPT",
  2527  			masq:     false,
  2528  		},
  2529  	})
  2530  
  2531  	fp.OnServiceDelete(svc)
  2532  	fp.syncProxyRules()
  2533  
  2534  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2535  		{
  2536  			name:     "HealthCheckNodePort no longer has any rule",
  2537  			sourceIP: "1.2.3.4",
  2538  			destIP:   testNodeIP,
  2539  			destPort: svcHealthCheckNodePort,
  2540  			output:   "",
  2541  		},
  2542  	})
  2543  }
  2544  
  2545  func TestDropInvalidRule(t *testing.T) {
  2546  	for _, tcpLiberal := range []bool{false, true} {
  2547  		t.Run(fmt.Sprintf("tcpLiberal %t", tcpLiberal), func(t *testing.T) {
  2548  			ipt := iptablestest.NewFake()
  2549  			fp := NewFakeProxier(ipt)
  2550  			fp.conntrackTCPLiberal = tcpLiberal
  2551  			fp.syncProxyRules()
  2552  
  2553  			var expected string
  2554  			if !tcpLiberal {
  2555  				expected = "-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP"
  2556  			}
  2557  			expected += dedent.Dedent(`
  2558  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  2559  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  2560  				`)
  2561  
  2562  			assertIPTablesChainEqual(t, getLine(), utiliptables.TableFilter, kubeForwardChain, expected, fp.iptablesData.String())
  2563  		})
  2564  	}
  2565  }
  2566  
  2567  func TestMasqueradeRule(t *testing.T) {
  2568  	for _, randomFully := range []bool{false, true} {
  2569  		t.Run(fmt.Sprintf("randomFully %t", randomFully), func(t *testing.T) {
  2570  			ipt := iptablestest.NewFake().SetHasRandomFully(randomFully)
  2571  			fp := NewFakeProxier(ipt)
  2572  			fp.syncProxyRules()
  2573  
  2574  			expectedFmt := dedent.Dedent(`
  2575  				-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  2576  				-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  2577  				-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE%s
  2578  				`)
  2579  			var expected string
  2580  			if randomFully {
  2581  				expected = fmt.Sprintf(expectedFmt, " --random-fully")
  2582  			} else {
  2583  				expected = fmt.Sprintf(expectedFmt, "")
  2584  			}
  2585  			assertIPTablesChainEqual(t, getLine(), utiliptables.TableNAT, kubePostroutingChain, expected, fp.iptablesData.String())
  2586  		})
  2587  	}
  2588  }
  2589  
  2590  // TestExternalTrafficPolicyLocal tests that traffic to externally-facing IPs does not get
  2591  // masqueraded when using Local traffic policy. For traffic from external sources, that
  2592  // means it can also only be routed to local endpoints, but for traffic from internal
  2593  // sources, it gets routed to all endpoints.
  2594  func TestExternalTrafficPolicyLocal(t *testing.T) {
  2595  	ipt := iptablestest.NewFake()
  2596  	fp := NewFakeProxier(ipt)
  2597  
  2598  	svcIP := "172.30.0.41"
  2599  	svcPort := 80
  2600  	svcNodePort := 3001
  2601  	svcHealthCheckNodePort := 30000
  2602  	svcExternalIPs := "192.168.99.11"
  2603  	svcLBIP := "1.2.3.4"
  2604  	svcPortName := proxy.ServicePortName{
  2605  		NamespacedName: makeNSN("ns1", "svc1"),
  2606  		Port:           "p80",
  2607  	}
  2608  
  2609  	makeServiceMap(fp,
  2610  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  2611  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  2612  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  2613  			svc.Spec.ClusterIP = svcIP
  2614  			svc.Spec.ExternalIPs = []string{svcExternalIPs}
  2615  			svc.Spec.Ports = []v1.ServicePort{{
  2616  				Name:       svcPortName.Port,
  2617  				Port:       int32(svcPort),
  2618  				Protocol:   v1.ProtocolTCP,
  2619  				NodePort:   int32(svcNodePort),
  2620  				TargetPort: intstr.FromInt32(int32(svcPort)),
  2621  			}}
  2622  			svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort)
  2623  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  2624  				IP: svcLBIP,
  2625  			}}
  2626  		}),
  2627  	)
  2628  
  2629  	epIP1 := "10.180.0.1"
  2630  	epIP2 := "10.180.2.1"
  2631  	populateEndpointSlices(fp,
  2632  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  2633  			eps.AddressType = discovery.AddressTypeIPv4
  2634  			eps.Endpoints = []discovery.Endpoint{{
  2635  				Addresses: []string{epIP1},
  2636  			}, {
  2637  				Addresses: []string{epIP2},
  2638  				NodeName:  ptr.To(testHostname),
  2639  			}}
  2640  			eps.Ports = []discovery.EndpointPort{{
  2641  				Name:     ptr.To(svcPortName.Port),
  2642  				Port:     ptr.To(int32(svcPort)),
  2643  				Protocol: ptr.To(v1.ProtocolTCP),
  2644  			}}
  2645  		}),
  2646  	)
  2647  
  2648  	fp.syncProxyRules()
  2649  
  2650  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2651  		{
  2652  			name:     "pod to cluster IP hits both endpoints, unmasqueraded",
  2653  			sourceIP: "10.0.0.2",
  2654  			destIP:   svcIP,
  2655  			destPort: svcPort,
  2656  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2657  			masq:     false,
  2658  		},
  2659  		{
  2660  			name:     "pod to external IP hits both endpoints, unmasqueraded",
  2661  			sourceIP: "10.0.0.2",
  2662  			destIP:   svcExternalIPs,
  2663  			destPort: svcPort,
  2664  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2665  			masq:     false,
  2666  		},
  2667  		{
  2668  			name:     "external to external IP hits only local endpoint, unmasqueraded",
  2669  			sourceIP: testExternalClient,
  2670  			destIP:   svcExternalIPs,
  2671  			destPort: svcPort,
  2672  			output:   fmt.Sprintf("%s:%d", epIP2, svcPort),
  2673  			masq:     false,
  2674  		},
  2675  		{
  2676  			name:     "pod to LB IP hits only both endpoints, unmasqueraded",
  2677  			sourceIP: "10.0.0.2",
  2678  			destIP:   svcLBIP,
  2679  			destPort: svcPort,
  2680  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2681  			masq:     false,
  2682  		},
  2683  		{
  2684  			name:     "external to LB IP hits only local endpoint, unmasqueraded",
  2685  			sourceIP: testExternalClient,
  2686  			destIP:   svcLBIP,
  2687  			destPort: svcPort,
  2688  			output:   fmt.Sprintf("%s:%d", epIP2, svcPort),
  2689  			masq:     false,
  2690  		},
  2691  		{
  2692  			name:     "pod to NodePort hits both endpoints, unmasqueraded",
  2693  			sourceIP: "10.0.0.2",
  2694  			destIP:   testNodeIP,
  2695  			destPort: svcNodePort,
  2696  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2697  			masq:     false,
  2698  		},
  2699  		{
  2700  			name:     "external to NodePort hits only local endpoint, unmasqueraded",
  2701  			sourceIP: testExternalClient,
  2702  			destIP:   testNodeIP,
  2703  			destPort: svcNodePort,
  2704  			output:   fmt.Sprintf("%s:%d", epIP2, svcPort),
  2705  			masq:     false,
  2706  		},
  2707  	})
  2708  }
  2709  
  2710  // TestExternalTrafficPolicyCluster tests that traffic to an externally-facing IP gets
  2711  // masqueraded when using Cluster traffic policy.
  2712  func TestExternalTrafficPolicyCluster(t *testing.T) {
  2713  	ipt := iptablestest.NewFake()
  2714  	fp := NewFakeProxier(ipt)
  2715  
  2716  	svcIP := "172.30.0.41"
  2717  	svcPort := 80
  2718  	svcNodePort := 3001
  2719  	svcExternalIPs := "192.168.99.11"
  2720  	svcLBIP := "1.2.3.4"
  2721  	svcPortName := proxy.ServicePortName{
  2722  		NamespacedName: makeNSN("ns1", "svc1"),
  2723  		Port:           "p80",
  2724  	}
  2725  
  2726  	makeServiceMap(fp,
  2727  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  2728  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  2729  			svc.Spec.ClusterIP = svcIP
  2730  			svc.Spec.ExternalIPs = []string{svcExternalIPs}
  2731  			svc.Spec.Ports = []v1.ServicePort{{
  2732  				Name:       svcPortName.Port,
  2733  				Port:       int32(svcPort),
  2734  				Protocol:   v1.ProtocolTCP,
  2735  				NodePort:   int32(svcNodePort),
  2736  				TargetPort: intstr.FromInt32(int32(svcPort)),
  2737  			}}
  2738  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  2739  				IP: svcLBIP,
  2740  			}}
  2741  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster
  2742  		}),
  2743  	)
  2744  
  2745  	epIP1 := "10.180.0.1"
  2746  	epIP2 := "10.180.2.1"
  2747  	populateEndpointSlices(fp,
  2748  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  2749  			eps.AddressType = discovery.AddressTypeIPv4
  2750  			eps.Endpoints = []discovery.Endpoint{{
  2751  				Addresses: []string{epIP1},
  2752  				NodeName:  nil,
  2753  			}, {
  2754  				Addresses: []string{epIP2},
  2755  				NodeName:  ptr.To(testHostname),
  2756  			}}
  2757  			eps.Ports = []discovery.EndpointPort{{
  2758  				Name:     ptr.To(svcPortName.Port),
  2759  				Port:     ptr.To(int32(svcPort)),
  2760  				Protocol: ptr.To(v1.ProtocolTCP),
  2761  			}}
  2762  		}),
  2763  	)
  2764  
  2765  	fp.syncProxyRules()
  2766  
  2767  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2768  		{
  2769  			name:     "pod to cluster IP hits both endpoints, unmasqueraded",
  2770  			sourceIP: "10.0.0.2",
  2771  			destIP:   svcIP,
  2772  			destPort: svcPort,
  2773  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2774  			masq:     false,
  2775  		},
  2776  		{
  2777  			name:     "pod to external IP hits both endpoints, masqueraded",
  2778  			sourceIP: "10.0.0.2",
  2779  			destIP:   svcExternalIPs,
  2780  			destPort: svcPort,
  2781  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2782  			masq:     true,
  2783  		},
  2784  		{
  2785  			name:     "external to external IP hits both endpoints, masqueraded",
  2786  			sourceIP: testExternalClient,
  2787  			destIP:   svcExternalIPs,
  2788  			destPort: svcPort,
  2789  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2790  			masq:     true,
  2791  		},
  2792  		{
  2793  			name:     "pod to LB IP hits both endpoints, masqueraded",
  2794  			sourceIP: "10.0.0.2",
  2795  			destIP:   svcLBIP,
  2796  			destPort: svcPort,
  2797  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2798  			masq:     true,
  2799  		},
  2800  		{
  2801  			name:     "external to LB IP hits both endpoints, masqueraded",
  2802  			sourceIP: testExternalClient,
  2803  			destIP:   svcLBIP,
  2804  			destPort: svcPort,
  2805  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2806  			masq:     true,
  2807  		},
  2808  		{
  2809  			name:     "pod to NodePort hits both endpoints, masqueraded",
  2810  			sourceIP: "10.0.0.2",
  2811  			destIP:   testNodeIP,
  2812  			destPort: svcNodePort,
  2813  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2814  			masq:     true,
  2815  		},
  2816  		{
  2817  			name:     "external to NodePort hits both endpoints, masqueraded",
  2818  			sourceIP: testExternalClient,
  2819  			destIP:   testNodeIP,
  2820  			destPort: svcNodePort,
  2821  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2822  			masq:     true,
  2823  		},
  2824  	})
  2825  }
  2826  
  2827  func TestComputeProbability(t *testing.T) {
  2828  	expectedProbabilities := map[int]string{
  2829  		1:      "1.0000000000",
  2830  		2:      "0.5000000000",
  2831  		10:     "0.1000000000",
  2832  		100:    "0.0100000000",
  2833  		1000:   "0.0010000000",
  2834  		10000:  "0.0001000000",
  2835  		100000: "0.0000100000",
  2836  		100001: "0.0000099999",
  2837  	}
  2838  
  2839  	for num, expected := range expectedProbabilities {
  2840  		actual := computeProbability(num)
  2841  		if actual != expected {
  2842  			t.Errorf("Expected computeProbability(%d) to be %s, got: %s", num, expected, actual)
  2843  		}
  2844  	}
  2845  
  2846  	prevProbability := float64(0)
  2847  	for i := 100000; i > 1; i-- {
  2848  		currProbability, err := strconv.ParseFloat(computeProbability(i), 64)
  2849  		if err != nil {
  2850  			t.Fatalf("Error parsing float probability for %d: %v", i, err)
  2851  		}
  2852  		if currProbability <= prevProbability {
  2853  			t.Fatalf("Probability unexpectedly <= to previous probability for %d: (%0.10f <= %0.10f)", i, currProbability, prevProbability)
  2854  		}
  2855  		prevProbability = currProbability
  2856  	}
  2857  }
  2858  
  2859  func makeTestService(namespace, name string, svcFunc func(*v1.Service)) *v1.Service {
  2860  	svc := &v1.Service{
  2861  		ObjectMeta: metav1.ObjectMeta{
  2862  			Name:        name,
  2863  			Namespace:   namespace,
  2864  			Annotations: map[string]string{},
  2865  		},
  2866  		Spec:   v1.ServiceSpec{},
  2867  		Status: v1.ServiceStatus{},
  2868  	}
  2869  	svcFunc(svc)
  2870  	return svc
  2871  }
  2872  
  2873  func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port, nodeport int32, targetPort int) []v1.ServicePort {
  2874  	svcPort := v1.ServicePort{
  2875  		Name:       name,
  2876  		Protocol:   protocol,
  2877  		Port:       port,
  2878  		NodePort:   nodeport,
  2879  		TargetPort: intstr.FromInt32(int32(targetPort)),
  2880  	}
  2881  	return append(array, svcPort)
  2882  }
  2883  
  2884  func TestBuildServiceMapAddRemove(t *testing.T) {
  2885  	ipt := iptablestest.NewFake()
  2886  	fp := NewFakeProxier(ipt)
  2887  
  2888  	services := []*v1.Service{
  2889  		makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
  2890  			svc.Spec.Type = v1.ServiceTypeClusterIP
  2891  			svc.Spec.ClusterIP = "172.30.55.4"
  2892  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
  2893  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
  2894  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "sctpport", "SCTP", 1236, 6321, 0)
  2895  		}),
  2896  		makeTestService("somewhere-else", "node-port", func(svc *v1.Service) {
  2897  			svc.Spec.Type = v1.ServiceTypeNodePort
  2898  			svc.Spec.ClusterIP = "172.30.55.10"
  2899  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blahblah", "UDP", 345, 678, 0)
  2900  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "moreblahblah", "TCP", 344, 677, 0)
  2901  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "muchmoreblah", "SCTP", 343, 676, 0)
  2902  		}),
  2903  		makeTestService("somewhere", "load-balancer", func(svc *v1.Service) {
  2904  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  2905  			svc.Spec.ClusterIP = "172.30.55.11"
  2906  			svc.Spec.LoadBalancerIP = "1.2.3.4"
  2907  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar", "UDP", 8675, 30061, 7000)
  2908  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8676, 30062, 7001)
  2909  			svc.Status.LoadBalancer = v1.LoadBalancerStatus{
  2910  				Ingress: []v1.LoadBalancerIngress{
  2911  					{IP: "1.2.3.4"},
  2912  				},
  2913  			}
  2914  		}),
  2915  		makeTestService("somewhere", "only-local-load-balancer", func(svc *v1.Service) {
  2916  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  2917  			svc.Spec.ClusterIP = "172.30.55.12"
  2918  			svc.Spec.LoadBalancerIP = "5.6.7.8"
  2919  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar2", "UDP", 8677, 30063, 7002)
  2920  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8678, 30064, 7003)
  2921  			svc.Status.LoadBalancer = v1.LoadBalancerStatus{
  2922  				Ingress: []v1.LoadBalancerIngress{
  2923  					{IP: "5.6.7.8"},
  2924  				},
  2925  			}
  2926  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  2927  			svc.Spec.HealthCheckNodePort = 345
  2928  		}),
  2929  	}
  2930  
  2931  	for i := range services {
  2932  		fp.OnServiceAdd(services[i])
  2933  	}
  2934  	result := fp.svcPortMap.Update(fp.serviceChanges)
  2935  	if len(fp.svcPortMap) != 10 {
  2936  		t.Errorf("expected service map length 10, got %v", fp.svcPortMap)
  2937  	}
  2938  
  2939  	if len(result.DeletedUDPClusterIPs) != 0 {
  2940  		// Services only added, so nothing stale yet
  2941  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  2942  	}
  2943  
  2944  	// The only-local-loadbalancer ones get added
  2945  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  2946  	if len(healthCheckNodePorts) != 1 {
  2947  		t.Errorf("expected 1 healthcheck port, got %v", healthCheckNodePorts)
  2948  	} else {
  2949  		nsn := makeNSN("somewhere", "only-local-load-balancer")
  2950  		if port, found := healthCheckNodePorts[nsn]; !found || port != 345 {
  2951  			t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, healthCheckNodePorts)
  2952  		}
  2953  	}
  2954  
  2955  	// Remove some stuff
  2956  	// oneService is a modification of services[0] with removed first port.
  2957  	oneService := makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
  2958  		svc.Spec.Type = v1.ServiceTypeClusterIP
  2959  		svc.Spec.ClusterIP = "172.30.55.4"
  2960  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
  2961  	})
  2962  
  2963  	fp.OnServiceUpdate(services[0], oneService)
  2964  	fp.OnServiceDelete(services[1])
  2965  	fp.OnServiceDelete(services[2])
  2966  	fp.OnServiceDelete(services[3])
  2967  
  2968  	result = fp.svcPortMap.Update(fp.serviceChanges)
  2969  	if len(fp.svcPortMap) != 1 {
  2970  		t.Errorf("expected service map length 1, got %v", fp.svcPortMap)
  2971  	}
  2972  
  2973  	// All services but one were deleted. While you'd expect only the ClusterIPs
  2974  	// from the three deleted services here, we still have the ClusterIP for
  2975  	// the not-deleted service, because one of it's ServicePorts was deleted.
  2976  	expectedStaleUDPServices := []string{"172.30.55.10", "172.30.55.4", "172.30.55.11", "172.30.55.12"}
  2977  	if len(result.DeletedUDPClusterIPs) != len(expectedStaleUDPServices) {
  2978  		t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.DeletedUDPClusterIPs.UnsortedList())
  2979  	}
  2980  	for _, ip := range expectedStaleUDPServices {
  2981  		if !result.DeletedUDPClusterIPs.Has(ip) {
  2982  			t.Errorf("expected stale UDP service service %s", ip)
  2983  		}
  2984  	}
  2985  
  2986  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  2987  	if len(healthCheckNodePorts) != 0 {
  2988  		t.Errorf("expected 0 healthcheck ports, got %v", healthCheckNodePorts)
  2989  	}
  2990  }
  2991  
  2992  func TestBuildServiceMapServiceHeadless(t *testing.T) {
  2993  	ipt := iptablestest.NewFake()
  2994  	fp := NewFakeProxier(ipt)
  2995  
  2996  	makeServiceMap(fp,
  2997  		makeTestService("somewhere-else", "headless", func(svc *v1.Service) {
  2998  			svc.Spec.Type = v1.ServiceTypeClusterIP
  2999  			svc.Spec.ClusterIP = v1.ClusterIPNone
  3000  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0)
  3001  		}),
  3002  		makeTestService("somewhere-else", "headless-without-port", func(svc *v1.Service) {
  3003  			svc.Spec.Type = v1.ServiceTypeClusterIP
  3004  			svc.Spec.ClusterIP = v1.ClusterIPNone
  3005  		}),
  3006  	)
  3007  
  3008  	// Headless service should be ignored
  3009  	result := fp.svcPortMap.Update(fp.serviceChanges)
  3010  	if len(fp.svcPortMap) != 0 {
  3011  		t.Errorf("expected service map length 0, got %d", len(fp.svcPortMap))
  3012  	}
  3013  
  3014  	if len(result.DeletedUDPClusterIPs) != 0 {
  3015  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  3016  	}
  3017  
  3018  	// No proxied services, so no healthchecks
  3019  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  3020  	if len(healthCheckNodePorts) != 0 {
  3021  		t.Errorf("expected healthcheck ports length 0, got %d", len(healthCheckNodePorts))
  3022  	}
  3023  }
  3024  
  3025  func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
  3026  	ipt := iptablestest.NewFake()
  3027  	fp := NewFakeProxier(ipt)
  3028  
  3029  	makeServiceMap(fp,
  3030  		makeTestService("somewhere-else", "external-name", func(svc *v1.Service) {
  3031  			svc.Spec.Type = v1.ServiceTypeExternalName
  3032  			svc.Spec.ClusterIP = "172.30.55.4" // Should be ignored
  3033  			svc.Spec.ExternalName = "foo2.bar.com"
  3034  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blah", "UDP", 1235, 5321, 0)
  3035  		}),
  3036  	)
  3037  
  3038  	result := fp.svcPortMap.Update(fp.serviceChanges)
  3039  	if len(fp.svcPortMap) != 0 {
  3040  		t.Errorf("expected service map length 0, got %v", fp.svcPortMap)
  3041  	}
  3042  	if len(result.DeletedUDPClusterIPs) != 0 {
  3043  		t.Errorf("expected stale UDP services length 0, got %v", result.DeletedUDPClusterIPs)
  3044  	}
  3045  	// No proxied services, so no healthchecks
  3046  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  3047  	if len(healthCheckNodePorts) != 0 {
  3048  		t.Errorf("expected healthcheck ports length 0, got %v", healthCheckNodePorts)
  3049  	}
  3050  }
  3051  
  3052  func TestBuildServiceMapServiceUpdate(t *testing.T) {
  3053  	ipt := iptablestest.NewFake()
  3054  	fp := NewFakeProxier(ipt)
  3055  
  3056  	servicev1 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
  3057  		svc.Spec.Type = v1.ServiceTypeClusterIP
  3058  		svc.Spec.ClusterIP = "172.30.55.4"
  3059  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
  3060  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 0)
  3061  	})
  3062  	servicev2 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
  3063  		svc.Spec.Type = v1.ServiceTypeLoadBalancer
  3064  		svc.Spec.ClusterIP = "172.30.55.4"
  3065  		svc.Spec.LoadBalancerIP = "1.2.3.4"
  3066  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 7002)
  3067  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 7003)
  3068  		svc.Status.LoadBalancer = v1.LoadBalancerStatus{
  3069  			Ingress: []v1.LoadBalancerIngress{
  3070  				{IP: "1.2.3.4"},
  3071  			},
  3072  		}
  3073  		svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  3074  		svc.Spec.HealthCheckNodePort = 345
  3075  	})
  3076  
  3077  	fp.OnServiceAdd(servicev1)
  3078  
  3079  	result := fp.svcPortMap.Update(fp.serviceChanges)
  3080  	if len(fp.svcPortMap) != 2 {
  3081  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  3082  	}
  3083  	if len(result.DeletedUDPClusterIPs) != 0 {
  3084  		// Services only added, so nothing stale yet
  3085  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  3086  	}
  3087  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  3088  	if len(healthCheckNodePorts) != 0 {
  3089  		t.Errorf("expected healthcheck ports length 0, got %v", healthCheckNodePorts)
  3090  	}
  3091  
  3092  	// Change service to load-balancer
  3093  	fp.OnServiceUpdate(servicev1, servicev2)
  3094  	result = fp.svcPortMap.Update(fp.serviceChanges)
  3095  	if len(fp.svcPortMap) != 2 {
  3096  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  3097  	}
  3098  	if len(result.DeletedUDPClusterIPs) != 0 {
  3099  		t.Errorf("expected stale UDP services length 0, got %v", result.DeletedUDPClusterIPs.UnsortedList())
  3100  	}
  3101  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  3102  	if len(healthCheckNodePorts) != 1 {
  3103  		t.Errorf("expected healthcheck ports length 1, got %v", healthCheckNodePorts)
  3104  	}
  3105  
  3106  	// No change; make sure the service map stays the same and there are
  3107  	// no health-check changes
  3108  	fp.OnServiceUpdate(servicev2, servicev2)
  3109  	result = fp.svcPortMap.Update(fp.serviceChanges)
  3110  	if len(fp.svcPortMap) != 2 {
  3111  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  3112  	}
  3113  	if len(result.DeletedUDPClusterIPs) != 0 {
  3114  		t.Errorf("expected stale UDP services length 0, got %v", result.DeletedUDPClusterIPs.UnsortedList())
  3115  	}
  3116  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  3117  	if len(healthCheckNodePorts) != 1 {
  3118  		t.Errorf("expected healthcheck ports length 1, got %v", healthCheckNodePorts)
  3119  	}
  3120  
  3121  	// And back to ClusterIP
  3122  	fp.OnServiceUpdate(servicev2, servicev1)
  3123  	result = fp.svcPortMap.Update(fp.serviceChanges)
  3124  	if len(fp.svcPortMap) != 2 {
  3125  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  3126  	}
  3127  	if len(result.DeletedUDPClusterIPs) != 0 {
  3128  		// Services only added, so nothing stale yet
  3129  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  3130  	}
  3131  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  3132  	if len(healthCheckNodePorts) != 0 {
  3133  		t.Errorf("expected healthcheck ports length 0, got %v", healthCheckNodePorts)
  3134  	}
  3135  }
  3136  
  3137  func populateEndpointSlices(proxier *Proxier, allEndpointSlices ...*discovery.EndpointSlice) {
  3138  	for i := range allEndpointSlices {
  3139  		proxier.OnEndpointSliceAdd(allEndpointSlices[i])
  3140  	}
  3141  }
  3142  
  3143  func makeTestEndpointSlice(namespace, name string, sliceNum int, epsFunc func(*discovery.EndpointSlice)) *discovery.EndpointSlice {
  3144  	eps := &discovery.EndpointSlice{
  3145  		ObjectMeta: metav1.ObjectMeta{
  3146  			Name:      fmt.Sprintf("%s-%d", name, sliceNum),
  3147  			Namespace: namespace,
  3148  			Labels:    map[string]string{discovery.LabelServiceName: name},
  3149  		},
  3150  	}
  3151  	epsFunc(eps)
  3152  	return eps
  3153  }
  3154  
  3155  func makeNSN(namespace, name string) types.NamespacedName {
  3156  	return types.NamespacedName{Namespace: namespace, Name: name}
  3157  }
  3158  
  3159  func makeServicePortName(ns, name, port string, protocol v1.Protocol) proxy.ServicePortName {
  3160  	return proxy.ServicePortName{
  3161  		NamespacedName: makeNSN(ns, name),
  3162  		Port:           port,
  3163  		Protocol:       protocol,
  3164  	}
  3165  }
  3166  
  3167  func makeServiceMap(proxier *Proxier, allServices ...*v1.Service) {
  3168  	for i := range allServices {
  3169  		proxier.OnServiceAdd(allServices[i])
  3170  	}
  3171  
  3172  	proxier.mu.Lock()
  3173  	defer proxier.mu.Unlock()
  3174  	proxier.servicesSynced = true
  3175  }
  3176  
  3177  type endpointExpectation struct {
  3178  	endpoint string
  3179  	isLocal  bool
  3180  }
  3181  
  3182  func checkEndpointExpectations(t *testing.T, tci int, newMap proxy.EndpointsMap, expected map[proxy.ServicePortName][]endpointExpectation) {
  3183  	if len(newMap) != len(expected) {
  3184  		t.Errorf("[%d] expected %d results, got %d: %v", tci, len(expected), len(newMap), newMap)
  3185  	}
  3186  	for x := range expected {
  3187  		if len(newMap[x]) != len(expected[x]) {
  3188  			t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(expected[x]), x, len(newMap[x]))
  3189  		} else {
  3190  			for i := range expected[x] {
  3191  				newEp := newMap[x][i]
  3192  				if newEp.String() != expected[x][i].endpoint ||
  3193  					newEp.IsLocal() != expected[x][i].isLocal {
  3194  					t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newEp)
  3195  				}
  3196  			}
  3197  		}
  3198  	}
  3199  }
  3200  
  3201  func TestUpdateEndpointsMap(t *testing.T) {
  3202  	emptyEndpointSlices := []*discovery.EndpointSlice{
  3203  		makeTestEndpointSlice("ns1", "ep1", 1, func(*discovery.EndpointSlice) {}),
  3204  	}
  3205  	subset1 := func(eps *discovery.EndpointSlice) {
  3206  		eps.AddressType = discovery.AddressTypeIPv4
  3207  		eps.Endpoints = []discovery.Endpoint{{
  3208  			Addresses: []string{"10.1.1.1"},
  3209  		}}
  3210  		eps.Ports = []discovery.EndpointPort{{
  3211  			Name:     ptr.To("p11"),
  3212  			Port:     ptr.To[int32](11),
  3213  			Protocol: ptr.To(v1.ProtocolUDP),
  3214  		}}
  3215  	}
  3216  	subset2 := func(eps *discovery.EndpointSlice) {
  3217  		eps.AddressType = discovery.AddressTypeIPv4
  3218  		eps.Endpoints = []discovery.Endpoint{{
  3219  			Addresses: []string{"10.1.1.2"},
  3220  		}}
  3221  		eps.Ports = []discovery.EndpointPort{{
  3222  			Name:     ptr.To("p12"),
  3223  			Port:     ptr.To[int32](12),
  3224  			Protocol: ptr.To(v1.ProtocolUDP),
  3225  		}}
  3226  	}
  3227  	namedPortLocal := []*discovery.EndpointSlice{
  3228  		makeTestEndpointSlice("ns1", "ep1", 1,
  3229  			func(eps *discovery.EndpointSlice) {
  3230  				eps.AddressType = discovery.AddressTypeIPv4
  3231  				eps.Endpoints = []discovery.Endpoint{{
  3232  					Addresses: []string{"10.1.1.1"},
  3233  					NodeName:  ptr.To(testHostname),
  3234  				}}
  3235  				eps.Ports = []discovery.EndpointPort{{
  3236  					Name:     ptr.To("p11"),
  3237  					Port:     ptr.To[int32](11),
  3238  					Protocol: ptr.To(v1.ProtocolUDP),
  3239  				}}
  3240  			}),
  3241  	}
  3242  	namedPort := []*discovery.EndpointSlice{
  3243  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  3244  	}
  3245  	namedPortRenamed := []*discovery.EndpointSlice{
  3246  		makeTestEndpointSlice("ns1", "ep1", 1,
  3247  			func(eps *discovery.EndpointSlice) {
  3248  				eps.AddressType = discovery.AddressTypeIPv4
  3249  				eps.Endpoints = []discovery.Endpoint{{
  3250  					Addresses: []string{"10.1.1.1"},
  3251  				}}
  3252  				eps.Ports = []discovery.EndpointPort{{
  3253  					Name:     ptr.To("p11-2"),
  3254  					Port:     ptr.To[int32](11),
  3255  					Protocol: ptr.To(v1.ProtocolUDP),
  3256  				}}
  3257  			}),
  3258  	}
  3259  	namedPortRenumbered := []*discovery.EndpointSlice{
  3260  		makeTestEndpointSlice("ns1", "ep1", 1,
  3261  			func(eps *discovery.EndpointSlice) {
  3262  				eps.AddressType = discovery.AddressTypeIPv4
  3263  				eps.Endpoints = []discovery.Endpoint{{
  3264  					Addresses: []string{"10.1.1.1"},
  3265  				}}
  3266  				eps.Ports = []discovery.EndpointPort{{
  3267  					Name:     ptr.To("p11"),
  3268  					Port:     ptr.To[int32](22),
  3269  					Protocol: ptr.To(v1.ProtocolUDP),
  3270  				}}
  3271  			}),
  3272  	}
  3273  	namedPortsLocalNoLocal := []*discovery.EndpointSlice{
  3274  		makeTestEndpointSlice("ns1", "ep1", 1,
  3275  			func(eps *discovery.EndpointSlice) {
  3276  				eps.AddressType = discovery.AddressTypeIPv4
  3277  				eps.Endpoints = []discovery.Endpoint{{
  3278  					Addresses: []string{"10.1.1.1"},
  3279  				}, {
  3280  					Addresses: []string{"10.1.1.2"},
  3281  					NodeName:  ptr.To(testHostname),
  3282  				}}
  3283  				eps.Ports = []discovery.EndpointPort{{
  3284  					Name:     ptr.To("p11"),
  3285  					Port:     ptr.To[int32](11),
  3286  					Protocol: ptr.To(v1.ProtocolUDP),
  3287  				}, {
  3288  					Name:     ptr.To("p12"),
  3289  					Port:     ptr.To[int32](12),
  3290  					Protocol: ptr.To(v1.ProtocolUDP),
  3291  				}}
  3292  			}),
  3293  	}
  3294  	multipleSubsets := []*discovery.EndpointSlice{
  3295  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  3296  		makeTestEndpointSlice("ns1", "ep1", 2, subset2),
  3297  	}
  3298  	subsetLocal := func(eps *discovery.EndpointSlice) {
  3299  		eps.AddressType = discovery.AddressTypeIPv4
  3300  		eps.Endpoints = []discovery.Endpoint{{
  3301  			Addresses: []string{"10.1.1.2"},
  3302  			NodeName:  ptr.To(testHostname),
  3303  		}}
  3304  		eps.Ports = []discovery.EndpointPort{{
  3305  			Name:     ptr.To("p12"),
  3306  			Port:     ptr.To[int32](12),
  3307  			Protocol: ptr.To(v1.ProtocolUDP),
  3308  		}}
  3309  	}
  3310  	multipleSubsetsWithLocal := []*discovery.EndpointSlice{
  3311  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  3312  		makeTestEndpointSlice("ns1", "ep1", 2, subsetLocal),
  3313  	}
  3314  	subsetMultiplePortsLocal := func(eps *discovery.EndpointSlice) {
  3315  		eps.AddressType = discovery.AddressTypeIPv4
  3316  		eps.Endpoints = []discovery.Endpoint{{
  3317  			Addresses: []string{"10.1.1.1"},
  3318  			NodeName:  ptr.To(testHostname),
  3319  		}}
  3320  		eps.Ports = []discovery.EndpointPort{{
  3321  			Name:     ptr.To("p11"),
  3322  			Port:     ptr.To[int32](11),
  3323  			Protocol: ptr.To(v1.ProtocolUDP),
  3324  		}, {
  3325  			Name:     ptr.To("p12"),
  3326  			Port:     ptr.To[int32](12),
  3327  			Protocol: ptr.To(v1.ProtocolUDP),
  3328  		}}
  3329  	}
  3330  	subset3 := func(eps *discovery.EndpointSlice) {
  3331  		eps.AddressType = discovery.AddressTypeIPv4
  3332  		eps.Endpoints = []discovery.Endpoint{{
  3333  			Addresses: []string{"10.1.1.3"},
  3334  		}}
  3335  		eps.Ports = []discovery.EndpointPort{{
  3336  			Name:     ptr.To("p13"),
  3337  			Port:     ptr.To[int32](13),
  3338  			Protocol: ptr.To(v1.ProtocolUDP),
  3339  		}}
  3340  	}
  3341  	multipleSubsetsMultiplePortsLocal := []*discovery.EndpointSlice{
  3342  		makeTestEndpointSlice("ns1", "ep1", 1, subsetMultiplePortsLocal),
  3343  		makeTestEndpointSlice("ns1", "ep1", 2, subset3),
  3344  	}
  3345  	subsetMultipleIPsPorts1 := func(eps *discovery.EndpointSlice) {
  3346  		eps.AddressType = discovery.AddressTypeIPv4
  3347  		eps.Endpoints = []discovery.Endpoint{{
  3348  			Addresses: []string{"10.1.1.1"},
  3349  		}, {
  3350  			Addresses: []string{"10.1.1.2"},
  3351  			NodeName:  ptr.To(testHostname),
  3352  		}}
  3353  		eps.Ports = []discovery.EndpointPort{{
  3354  			Name:     ptr.To("p11"),
  3355  			Port:     ptr.To[int32](11),
  3356  			Protocol: ptr.To(v1.ProtocolUDP),
  3357  		}, {
  3358  			Name:     ptr.To("p12"),
  3359  			Port:     ptr.To[int32](12),
  3360  			Protocol: ptr.To(v1.ProtocolUDP),
  3361  		}}
  3362  	}
  3363  	subsetMultipleIPsPorts2 := func(eps *discovery.EndpointSlice) {
  3364  		eps.AddressType = discovery.AddressTypeIPv4
  3365  		eps.Endpoints = []discovery.Endpoint{{
  3366  			Addresses: []string{"10.1.1.3"},
  3367  		}, {
  3368  			Addresses: []string{"10.1.1.4"},
  3369  			NodeName:  ptr.To(testHostname),
  3370  		}}
  3371  		eps.Ports = []discovery.EndpointPort{{
  3372  			Name:     ptr.To("p13"),
  3373  			Port:     ptr.To[int32](13),
  3374  			Protocol: ptr.To(v1.ProtocolUDP),
  3375  		}, {
  3376  			Name:     ptr.To("p14"),
  3377  			Port:     ptr.To[int32](14),
  3378  			Protocol: ptr.To(v1.ProtocolUDP),
  3379  		}}
  3380  	}
  3381  	subsetMultipleIPsPorts3 := func(eps *discovery.EndpointSlice) {
  3382  		eps.AddressType = discovery.AddressTypeIPv4
  3383  		eps.Endpoints = []discovery.Endpoint{{
  3384  			Addresses: []string{"10.2.2.1"},
  3385  		}, {
  3386  			Addresses: []string{"10.2.2.2"},
  3387  			NodeName:  ptr.To(testHostname),
  3388  		}}
  3389  		eps.Ports = []discovery.EndpointPort{{
  3390  			Name:     ptr.To("p21"),
  3391  			Port:     ptr.To[int32](21),
  3392  			Protocol: ptr.To(v1.ProtocolUDP),
  3393  		}, {
  3394  			Name:     ptr.To("p22"),
  3395  			Port:     ptr.To[int32](22),
  3396  			Protocol: ptr.To(v1.ProtocolUDP),
  3397  		}}
  3398  	}
  3399  	multipleSubsetsIPsPorts := []*discovery.EndpointSlice{
  3400  		makeTestEndpointSlice("ns1", "ep1", 1, subsetMultipleIPsPorts1),
  3401  		makeTestEndpointSlice("ns1", "ep1", 2, subsetMultipleIPsPorts2),
  3402  		makeTestEndpointSlice("ns2", "ep2", 1, subsetMultipleIPsPorts3),
  3403  	}
  3404  	complexSubset1 := func(eps *discovery.EndpointSlice) {
  3405  		eps.AddressType = discovery.AddressTypeIPv4
  3406  		eps.Endpoints = []discovery.Endpoint{{
  3407  			Addresses: []string{"10.2.2.2"},
  3408  			NodeName:  ptr.To(testHostname),
  3409  		}, {
  3410  			Addresses: []string{"10.2.2.22"},
  3411  			NodeName:  ptr.To(testHostname),
  3412  		}}
  3413  		eps.Ports = []discovery.EndpointPort{{
  3414  			Name:     ptr.To("p22"),
  3415  			Port:     ptr.To[int32](22),
  3416  			Protocol: ptr.To(v1.ProtocolUDP),
  3417  		}}
  3418  	}
  3419  	complexSubset2 := func(eps *discovery.EndpointSlice) {
  3420  		eps.AddressType = discovery.AddressTypeIPv4
  3421  		eps.Endpoints = []discovery.Endpoint{{
  3422  			Addresses: []string{"10.2.2.3"},
  3423  			NodeName:  ptr.To(testHostname),
  3424  		}}
  3425  		eps.Ports = []discovery.EndpointPort{{
  3426  			Name:     ptr.To("p23"),
  3427  			Port:     ptr.To[int32](23),
  3428  			Protocol: ptr.To(v1.ProtocolUDP),
  3429  		}}
  3430  	}
  3431  	complexSubset3 := func(eps *discovery.EndpointSlice) {
  3432  		eps.AddressType = discovery.AddressTypeIPv4
  3433  		eps.Endpoints = []discovery.Endpoint{{
  3434  			Addresses: []string{"10.4.4.4"},
  3435  			NodeName:  ptr.To(testHostname),
  3436  		}, {
  3437  			Addresses: []string{"10.4.4.5"},
  3438  			NodeName:  ptr.To(testHostname),
  3439  		}}
  3440  		eps.Ports = []discovery.EndpointPort{{
  3441  			Name:     ptr.To("p44"),
  3442  			Port:     ptr.To[int32](44),
  3443  			Protocol: ptr.To(v1.ProtocolUDP),
  3444  		}}
  3445  	}
  3446  	complexSubset4 := func(eps *discovery.EndpointSlice) {
  3447  		eps.AddressType = discovery.AddressTypeIPv4
  3448  		eps.Endpoints = []discovery.Endpoint{{
  3449  			Addresses: []string{"10.4.4.6"},
  3450  			NodeName:  ptr.To(testHostname),
  3451  		}}
  3452  		eps.Ports = []discovery.EndpointPort{{
  3453  			Name:     ptr.To("p45"),
  3454  			Port:     ptr.To[int32](45),
  3455  			Protocol: ptr.To(v1.ProtocolUDP),
  3456  		}}
  3457  	}
  3458  	complexSubset5 := func(eps *discovery.EndpointSlice) {
  3459  		eps.AddressType = discovery.AddressTypeIPv4
  3460  		eps.Endpoints = []discovery.Endpoint{{
  3461  			Addresses: []string{"10.1.1.1"},
  3462  		}, {
  3463  			Addresses: []string{"10.1.1.11"},
  3464  		}}
  3465  		eps.Ports = []discovery.EndpointPort{{
  3466  			Name:     ptr.To("p11"),
  3467  			Port:     ptr.To[int32](11),
  3468  			Protocol: ptr.To(v1.ProtocolUDP),
  3469  		}}
  3470  	}
  3471  	complexSubset6 := func(eps *discovery.EndpointSlice) {
  3472  		eps.AddressType = discovery.AddressTypeIPv4
  3473  		eps.Endpoints = []discovery.Endpoint{{
  3474  			Addresses: []string{"10.1.1.2"},
  3475  		}}
  3476  		eps.Ports = []discovery.EndpointPort{{
  3477  			Name:     ptr.To("p12"),
  3478  			Port:     ptr.To[int32](12),
  3479  			Protocol: ptr.To(v1.ProtocolUDP),
  3480  		}, {
  3481  			Name:     ptr.To("p122"),
  3482  			Port:     ptr.To[int32](122),
  3483  			Protocol: ptr.To(v1.ProtocolUDP),
  3484  		}}
  3485  	}
  3486  	complexSubset7 := func(eps *discovery.EndpointSlice) {
  3487  		eps.AddressType = discovery.AddressTypeIPv4
  3488  		eps.Endpoints = []discovery.Endpoint{{
  3489  			Addresses: []string{"10.3.3.3"},
  3490  		}}
  3491  		eps.Ports = []discovery.EndpointPort{{
  3492  			Name:     ptr.To("p33"),
  3493  			Port:     ptr.To[int32](33),
  3494  			Protocol: ptr.To(v1.ProtocolUDP),
  3495  		}}
  3496  	}
  3497  	complexSubset8 := func(eps *discovery.EndpointSlice) {
  3498  		eps.AddressType = discovery.AddressTypeIPv4
  3499  		eps.Endpoints = []discovery.Endpoint{{
  3500  			Addresses: []string{"10.4.4.4"},
  3501  			NodeName:  ptr.To(testHostname),
  3502  		}}
  3503  		eps.Ports = []discovery.EndpointPort{{
  3504  			Name:     ptr.To("p44"),
  3505  			Port:     ptr.To[int32](44),
  3506  			Protocol: ptr.To(v1.ProtocolUDP),
  3507  		}}
  3508  	}
  3509  	complexBefore := []*discovery.EndpointSlice{
  3510  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  3511  		nil,
  3512  		makeTestEndpointSlice("ns2", "ep2", 1, complexSubset1),
  3513  		makeTestEndpointSlice("ns2", "ep2", 2, complexSubset2),
  3514  		nil,
  3515  		makeTestEndpointSlice("ns4", "ep4", 1, complexSubset3),
  3516  		makeTestEndpointSlice("ns4", "ep4", 2, complexSubset4),
  3517  	}
  3518  	complexAfter := []*discovery.EndpointSlice{
  3519  		makeTestEndpointSlice("ns1", "ep1", 1, complexSubset5),
  3520  		makeTestEndpointSlice("ns1", "ep1", 2, complexSubset6),
  3521  		nil,
  3522  		nil,
  3523  		makeTestEndpointSlice("ns3", "ep3", 1, complexSubset7),
  3524  		makeTestEndpointSlice("ns4", "ep4", 1, complexSubset8),
  3525  		nil,
  3526  	}
  3527  
  3528  	testCases := []struct {
  3529  		// previousEndpoints and currentEndpoints are used to call appropriate
  3530  		// handlers OnEndpoints* (based on whether corresponding values are nil
  3531  		// or non-nil) and must be of equal length.
  3532  		name                           string
  3533  		previousEndpoints              []*discovery.EndpointSlice
  3534  		currentEndpoints               []*discovery.EndpointSlice
  3535  		oldEndpoints                   map[proxy.ServicePortName][]endpointExpectation
  3536  		expectedResult                 map[proxy.ServicePortName][]endpointExpectation
  3537  		expectedDeletedUDPEndpoints    []proxy.ServiceEndpoint
  3538  		expectedNewlyActiveUDPServices map[proxy.ServicePortName]bool
  3539  		expectedLocalEndpoints         map[types.NamespacedName]int
  3540  	}{{
  3541  		// Case[0]: nothing
  3542  		name:                           "nothing",
  3543  		oldEndpoints:                   map[proxy.ServicePortName][]endpointExpectation{},
  3544  		expectedResult:                 map[proxy.ServicePortName][]endpointExpectation{},
  3545  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  3546  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3547  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  3548  	}, {
  3549  		// Case[1]: no change, named port, local
  3550  		name:              "no change, named port, local",
  3551  		previousEndpoints: namedPortLocal,
  3552  		currentEndpoints:  namedPortLocal,
  3553  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3554  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3555  				{endpoint: "10.1.1.1:11", isLocal: true},
  3556  			},
  3557  		},
  3558  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3559  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3560  				{endpoint: "10.1.1.1:11", isLocal: true},
  3561  			},
  3562  		},
  3563  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  3564  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3565  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3566  			makeNSN("ns1", "ep1"): 1,
  3567  		},
  3568  	}, {
  3569  		// Case[2]: no change, multiple subsets
  3570  		name:              "no change, multiple subsets",
  3571  		previousEndpoints: multipleSubsets,
  3572  		currentEndpoints:  multipleSubsets,
  3573  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3574  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3575  				{endpoint: "10.1.1.1:11", isLocal: false},
  3576  			},
  3577  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3578  				{endpoint: "10.1.1.2:12", isLocal: false},
  3579  			},
  3580  		},
  3581  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3582  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3583  				{endpoint: "10.1.1.1:11", isLocal: false},
  3584  			},
  3585  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3586  				{endpoint: "10.1.1.2:12", isLocal: false},
  3587  			},
  3588  		},
  3589  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  3590  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3591  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  3592  	}, {
  3593  		// Case[3]: no change, multiple subsets, multiple ports, local
  3594  		name:              "no change, multiple subsets, multiple ports, local",
  3595  		previousEndpoints: multipleSubsetsMultiplePortsLocal,
  3596  		currentEndpoints:  multipleSubsetsMultiplePortsLocal,
  3597  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3598  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3599  				{endpoint: "10.1.1.1:11", isLocal: true},
  3600  			},
  3601  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3602  				{endpoint: "10.1.1.1:12", isLocal: true},
  3603  			},
  3604  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  3605  				{endpoint: "10.1.1.3:13", isLocal: false},
  3606  			},
  3607  		},
  3608  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3609  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3610  				{endpoint: "10.1.1.1:11", isLocal: true},
  3611  			},
  3612  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3613  				{endpoint: "10.1.1.1:12", isLocal: true},
  3614  			},
  3615  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  3616  				{endpoint: "10.1.1.3:13", isLocal: false},
  3617  			},
  3618  		},
  3619  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  3620  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3621  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3622  			makeNSN("ns1", "ep1"): 1,
  3623  		},
  3624  	}, {
  3625  		// Case[4]: no change, multiple endpoints, subsets, IPs, and ports
  3626  		name:              "no change, multiple endpoints, subsets, IPs, and ports",
  3627  		previousEndpoints: multipleSubsetsIPsPorts,
  3628  		currentEndpoints:  multipleSubsetsIPsPorts,
  3629  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3630  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3631  				{endpoint: "10.1.1.1:11", isLocal: false},
  3632  				{endpoint: "10.1.1.2:11", isLocal: true},
  3633  			},
  3634  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3635  				{endpoint: "10.1.1.1:12", isLocal: false},
  3636  				{endpoint: "10.1.1.2:12", isLocal: true},
  3637  			},
  3638  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  3639  				{endpoint: "10.1.1.3:13", isLocal: false},
  3640  				{endpoint: "10.1.1.4:13", isLocal: true},
  3641  			},
  3642  			makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
  3643  				{endpoint: "10.1.1.3:14", isLocal: false},
  3644  				{endpoint: "10.1.1.4:14", isLocal: true},
  3645  			},
  3646  			makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
  3647  				{endpoint: "10.2.2.1:21", isLocal: false},
  3648  				{endpoint: "10.2.2.2:21", isLocal: true},
  3649  			},
  3650  			makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
  3651  				{endpoint: "10.2.2.1:22", isLocal: false},
  3652  				{endpoint: "10.2.2.2:22", isLocal: true},
  3653  			},
  3654  		},
  3655  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3656  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3657  				{endpoint: "10.1.1.1:11", isLocal: false},
  3658  				{endpoint: "10.1.1.2:11", isLocal: true},
  3659  			},
  3660  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3661  				{endpoint: "10.1.1.1:12", isLocal: false},
  3662  				{endpoint: "10.1.1.2:12", isLocal: true},
  3663  			},
  3664  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  3665  				{endpoint: "10.1.1.3:13", isLocal: false},
  3666  				{endpoint: "10.1.1.4:13", isLocal: true},
  3667  			},
  3668  			makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
  3669  				{endpoint: "10.1.1.3:14", isLocal: false},
  3670  				{endpoint: "10.1.1.4:14", isLocal: true},
  3671  			},
  3672  			makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
  3673  				{endpoint: "10.2.2.1:21", isLocal: false},
  3674  				{endpoint: "10.2.2.2:21", isLocal: true},
  3675  			},
  3676  			makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
  3677  				{endpoint: "10.2.2.1:22", isLocal: false},
  3678  				{endpoint: "10.2.2.2:22", isLocal: true},
  3679  			},
  3680  		},
  3681  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  3682  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3683  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3684  			makeNSN("ns1", "ep1"): 2,
  3685  			makeNSN("ns2", "ep2"): 1,
  3686  		},
  3687  	}, {
  3688  		// Case[5]: add an Endpoints
  3689  		name:              "add an Endpoints",
  3690  		previousEndpoints: []*discovery.EndpointSlice{nil},
  3691  		currentEndpoints:  namedPortLocal,
  3692  		oldEndpoints:      map[proxy.ServicePortName][]endpointExpectation{},
  3693  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3694  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3695  				{endpoint: "10.1.1.1:11", isLocal: true},
  3696  			},
  3697  		},
  3698  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  3699  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  3700  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): true,
  3701  		},
  3702  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3703  			makeNSN("ns1", "ep1"): 1,
  3704  		},
  3705  	}, {
  3706  		// Case[6]: remove an Endpoints
  3707  		name:              "remove an Endpoints",
  3708  		previousEndpoints: namedPortLocal,
  3709  		currentEndpoints:  []*discovery.EndpointSlice{nil},
  3710  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3711  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3712  				{endpoint: "10.1.1.1:11", isLocal: true},
  3713  			},
  3714  		},
  3715  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{},
  3716  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  3717  			Endpoint:        "10.1.1.1:11",
  3718  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  3719  		}},
  3720  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3721  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  3722  	}, {
  3723  		// Case[7]: add an IP and port
  3724  		name:              "add an IP and port",
  3725  		previousEndpoints: namedPort,
  3726  		currentEndpoints:  namedPortsLocalNoLocal,
  3727  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3728  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3729  				{endpoint: "10.1.1.1:11", isLocal: false},
  3730  			},
  3731  		},
  3732  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3733  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3734  				{endpoint: "10.1.1.1:11", isLocal: false},
  3735  				{endpoint: "10.1.1.2:11", isLocal: true},
  3736  			},
  3737  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3738  				{endpoint: "10.1.1.1:12", isLocal: false},
  3739  				{endpoint: "10.1.1.2:12", isLocal: true},
  3740  			},
  3741  		},
  3742  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  3743  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  3744  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
  3745  		},
  3746  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3747  			makeNSN("ns1", "ep1"): 1,
  3748  		},
  3749  	}, {
  3750  		// Case[8]: remove an IP and port
  3751  		name:              "remove an IP and port",
  3752  		previousEndpoints: namedPortsLocalNoLocal,
  3753  		currentEndpoints:  namedPort,
  3754  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3755  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3756  				{endpoint: "10.1.1.1:11", isLocal: false},
  3757  				{endpoint: "10.1.1.2:11", isLocal: true},
  3758  			},
  3759  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3760  				{endpoint: "10.1.1.1:12", isLocal: false},
  3761  				{endpoint: "10.1.1.2:12", isLocal: true},
  3762  			},
  3763  		},
  3764  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3765  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3766  				{endpoint: "10.1.1.1:11", isLocal: false},
  3767  			},
  3768  		},
  3769  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  3770  			Endpoint:        "10.1.1.2:11",
  3771  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  3772  		}, {
  3773  			Endpoint:        "10.1.1.1:12",
  3774  			ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
  3775  		}, {
  3776  			Endpoint:        "10.1.1.2:12",
  3777  			ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
  3778  		}},
  3779  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3780  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  3781  	}, {
  3782  		// Case[9]: add a subset
  3783  		name:              "add a subset",
  3784  		previousEndpoints: []*discovery.EndpointSlice{namedPort[0], nil},
  3785  		currentEndpoints:  multipleSubsetsWithLocal,
  3786  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3787  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3788  				{endpoint: "10.1.1.1:11", isLocal: false},
  3789  			},
  3790  		},
  3791  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3792  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3793  				{endpoint: "10.1.1.1:11", isLocal: false},
  3794  			},
  3795  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3796  				{endpoint: "10.1.1.2:12", isLocal: true},
  3797  			},
  3798  		},
  3799  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  3800  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  3801  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
  3802  		},
  3803  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3804  			makeNSN("ns1", "ep1"): 1,
  3805  		},
  3806  	}, {
  3807  		// Case[10]: remove a subset
  3808  		name:              "remove a subset",
  3809  		previousEndpoints: multipleSubsets,
  3810  		currentEndpoints:  []*discovery.EndpointSlice{namedPort[0], nil},
  3811  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3812  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3813  				{endpoint: "10.1.1.1:11", isLocal: false},
  3814  			},
  3815  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3816  				{endpoint: "10.1.1.2:12", isLocal: false},
  3817  			},
  3818  		},
  3819  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3820  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3821  				{endpoint: "10.1.1.1:11", isLocal: false},
  3822  			},
  3823  		},
  3824  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  3825  			Endpoint:        "10.1.1.2:12",
  3826  			ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
  3827  		}},
  3828  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3829  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  3830  	}, {
  3831  		// Case[11]: rename a port
  3832  		name:              "rename a port",
  3833  		previousEndpoints: namedPort,
  3834  		currentEndpoints:  namedPortRenamed,
  3835  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3836  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3837  				{endpoint: "10.1.1.1:11", isLocal: false},
  3838  			},
  3839  		},
  3840  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3841  			makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): {
  3842  				{endpoint: "10.1.1.1:11", isLocal: false},
  3843  			},
  3844  		},
  3845  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  3846  			Endpoint:        "10.1.1.1:11",
  3847  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  3848  		}},
  3849  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  3850  			makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): true,
  3851  		},
  3852  		expectedLocalEndpoints: map[types.NamespacedName]int{},
  3853  	}, {
  3854  		// Case[12]: renumber a port
  3855  		name:              "renumber a port",
  3856  		previousEndpoints: namedPort,
  3857  		currentEndpoints:  namedPortRenumbered,
  3858  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3859  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3860  				{endpoint: "10.1.1.1:11", isLocal: false},
  3861  			},
  3862  		},
  3863  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3864  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3865  				{endpoint: "10.1.1.1:22", isLocal: false},
  3866  			},
  3867  		},
  3868  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  3869  			Endpoint:        "10.1.1.1:11",
  3870  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  3871  		}},
  3872  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3873  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  3874  	}, {
  3875  		// Case[13]: complex add and remove
  3876  		name:              "complex add and remove",
  3877  		previousEndpoints: complexBefore,
  3878  		currentEndpoints:  complexAfter,
  3879  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3880  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3881  				{endpoint: "10.1.1.1:11", isLocal: false},
  3882  			},
  3883  			makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
  3884  				{endpoint: "10.2.2.22:22", isLocal: true},
  3885  				{endpoint: "10.2.2.2:22", isLocal: true},
  3886  			},
  3887  			makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP): {
  3888  				{endpoint: "10.2.2.3:23", isLocal: true},
  3889  			},
  3890  			makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
  3891  				{endpoint: "10.4.4.4:44", isLocal: true},
  3892  				{endpoint: "10.4.4.5:44", isLocal: true},
  3893  			},
  3894  			makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP): {
  3895  				{endpoint: "10.4.4.6:45", isLocal: true},
  3896  			},
  3897  		},
  3898  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3899  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3900  				{endpoint: "10.1.1.11:11", isLocal: false},
  3901  				{endpoint: "10.1.1.1:11", isLocal: false},
  3902  			},
  3903  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3904  				{endpoint: "10.1.1.2:12", isLocal: false},
  3905  			},
  3906  			makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): {
  3907  				{endpoint: "10.1.1.2:122", isLocal: false},
  3908  			},
  3909  			makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): {
  3910  				{endpoint: "10.3.3.3:33", isLocal: false},
  3911  			},
  3912  			makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
  3913  				{endpoint: "10.4.4.4:44", isLocal: true},
  3914  			},
  3915  		},
  3916  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  3917  			Endpoint:        "10.2.2.2:22",
  3918  			ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
  3919  		}, {
  3920  			Endpoint:        "10.2.2.22:22",
  3921  			ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
  3922  		}, {
  3923  			Endpoint:        "10.2.2.3:23",
  3924  			ServicePortName: makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP),
  3925  		}, {
  3926  			Endpoint:        "10.4.4.5:44",
  3927  			ServicePortName: makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP),
  3928  		}, {
  3929  			Endpoint:        "10.4.4.6:45",
  3930  			ServicePortName: makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP),
  3931  		}},
  3932  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  3933  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP):  true,
  3934  			makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): true,
  3935  			makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP):  true,
  3936  		},
  3937  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3938  			makeNSN("ns4", "ep4"): 1,
  3939  		},
  3940  	}, {
  3941  		// Case[14]: change from 0 endpoint address to 1 unnamed port
  3942  		name:              "change from 0 endpoint address to 1 unnamed port",
  3943  		previousEndpoints: emptyEndpointSlices,
  3944  		currentEndpoints:  namedPort,
  3945  		oldEndpoints:      map[proxy.ServicePortName][]endpointExpectation{},
  3946  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3947  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3948  				{endpoint: "10.1.1.1:11", isLocal: false},
  3949  			},
  3950  		},
  3951  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  3952  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  3953  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): true,
  3954  		},
  3955  		expectedLocalEndpoints: map[types.NamespacedName]int{},
  3956  	},
  3957  	}
  3958  
  3959  	for tci, tc := range testCases {
  3960  		t.Run(tc.name, func(t *testing.T) {
  3961  			ipt := iptablestest.NewFake()
  3962  			fp := NewFakeProxier(ipt)
  3963  			fp.hostname = testHostname
  3964  
  3965  			// First check that after adding all previous versions of endpoints,
  3966  			// the fp.oldEndpoints is as we expect.
  3967  			for i := range tc.previousEndpoints {
  3968  				if tc.previousEndpoints[i] != nil {
  3969  					fp.OnEndpointSliceAdd(tc.previousEndpoints[i])
  3970  				}
  3971  			}
  3972  			fp.endpointsMap.Update(fp.endpointsChanges)
  3973  			checkEndpointExpectations(t, tci, fp.endpointsMap, tc.oldEndpoints)
  3974  
  3975  			// Now let's call appropriate handlers to get to state we want to be.
  3976  			if len(tc.previousEndpoints) != len(tc.currentEndpoints) {
  3977  				t.Fatalf("[%d] different lengths of previous and current endpoints", tci)
  3978  			}
  3979  
  3980  			for i := range tc.previousEndpoints {
  3981  				prev, curr := tc.previousEndpoints[i], tc.currentEndpoints[i]
  3982  				switch {
  3983  				case prev == nil:
  3984  					fp.OnEndpointSliceAdd(curr)
  3985  				case curr == nil:
  3986  					fp.OnEndpointSliceDelete(prev)
  3987  				default:
  3988  					fp.OnEndpointSliceUpdate(prev, curr)
  3989  				}
  3990  			}
  3991  			result := fp.endpointsMap.Update(fp.endpointsChanges)
  3992  			newMap := fp.endpointsMap
  3993  			checkEndpointExpectations(t, tci, newMap, tc.expectedResult)
  3994  			if len(result.DeletedUDPEndpoints) != len(tc.expectedDeletedUDPEndpoints) {
  3995  				t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedDeletedUDPEndpoints), len(result.DeletedUDPEndpoints), result.DeletedUDPEndpoints)
  3996  			}
  3997  			for _, x := range tc.expectedDeletedUDPEndpoints {
  3998  				found := false
  3999  				for _, stale := range result.DeletedUDPEndpoints {
  4000  					if stale == x {
  4001  						found = true
  4002  						break
  4003  					}
  4004  				}
  4005  				if !found {
  4006  					t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.DeletedUDPEndpoints)
  4007  				}
  4008  			}
  4009  			if len(result.NewlyActiveUDPServices) != len(tc.expectedNewlyActiveUDPServices) {
  4010  				t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedNewlyActiveUDPServices), len(result.NewlyActiveUDPServices), result.NewlyActiveUDPServices)
  4011  			}
  4012  			for svcName := range tc.expectedNewlyActiveUDPServices {
  4013  				found := false
  4014  				for _, stale := range result.NewlyActiveUDPServices {
  4015  					if stale == svcName {
  4016  						found = true
  4017  					}
  4018  				}
  4019  				if !found {
  4020  					t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.NewlyActiveUDPServices)
  4021  				}
  4022  			}
  4023  			localReadyEndpoints := fp.endpointsMap.LocalReadyEndpoints()
  4024  			if !reflect.DeepEqual(localReadyEndpoints, tc.expectedLocalEndpoints) {
  4025  				t.Errorf("[%d] expected local endpoints %v, got %v", tci, tc.expectedLocalEndpoints, localReadyEndpoints)
  4026  			}
  4027  		})
  4028  	}
  4029  }
  4030  
  4031  // TestHealthCheckNodePortWhenTerminating tests that health check node ports are not enabled when all local endpoints are terminating
  4032  func TestHealthCheckNodePortWhenTerminating(t *testing.T) {
  4033  	ipt := iptablestest.NewFake()
  4034  	fp := NewFakeProxier(ipt)
  4035  	fp.OnServiceSynced()
  4036  	fp.OnEndpointSlicesSynced()
  4037  
  4038  	serviceName := "svc1"
  4039  	namespaceName := "ns1"
  4040  
  4041  	fp.OnServiceAdd(&v1.Service{
  4042  		ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
  4043  		Spec: v1.ServiceSpec{
  4044  			ClusterIP: "172.30.1.1",
  4045  			Selector:  map[string]string{"foo": "bar"},
  4046  			Ports:     []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt32(80), Protocol: v1.ProtocolTCP}},
  4047  		},
  4048  	})
  4049  
  4050  	endpointSlice := &discovery.EndpointSlice{
  4051  		ObjectMeta: metav1.ObjectMeta{
  4052  			Name:      fmt.Sprintf("%s-1", serviceName),
  4053  			Namespace: namespaceName,
  4054  			Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  4055  		},
  4056  		Ports: []discovery.EndpointPort{{
  4057  			Name:     ptr.To(""),
  4058  			Port:     ptr.To[int32](80),
  4059  			Protocol: ptr.To(v1.ProtocolTCP),
  4060  		}},
  4061  		AddressType: discovery.AddressTypeIPv4,
  4062  		Endpoints: []discovery.Endpoint{{
  4063  			Addresses:  []string{"10.0.1.1"},
  4064  			Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  4065  			NodeName:   ptr.To(testHostname),
  4066  		}, {
  4067  			Addresses:  []string{"10.0.1.2"},
  4068  			Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  4069  			NodeName:   ptr.To(testHostname),
  4070  		}, {
  4071  			Addresses:  []string{"10.0.1.3"},
  4072  			Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  4073  			NodeName:   ptr.To(testHostname),
  4074  		}, { // not ready endpoints should be ignored
  4075  			Addresses:  []string{"10.0.1.4"},
  4076  			Conditions: discovery.EndpointConditions{Ready: ptr.To(false)},
  4077  			NodeName:   ptr.To(testHostname),
  4078  		}},
  4079  	}
  4080  
  4081  	fp.OnEndpointSliceAdd(endpointSlice)
  4082  	_ = fp.endpointsMap.Update(fp.endpointsChanges)
  4083  	localReadyEndpoints := fp.endpointsMap.LocalReadyEndpoints()
  4084  	if len(localReadyEndpoints) != 1 {
  4085  		t.Errorf("unexpected number of local ready endpoints, expected 1 but got: %d", len(localReadyEndpoints))
  4086  	}
  4087  
  4088  	// set all endpoints to terminating
  4089  	endpointSliceTerminating := &discovery.EndpointSlice{
  4090  		ObjectMeta: metav1.ObjectMeta{
  4091  			Name:      fmt.Sprintf("%s-1", serviceName),
  4092  			Namespace: namespaceName,
  4093  			Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  4094  		},
  4095  		Ports: []discovery.EndpointPort{{
  4096  			Name:     ptr.To(""),
  4097  			Port:     ptr.To[int32](80),
  4098  			Protocol: ptr.To(v1.ProtocolTCP),
  4099  		}},
  4100  		AddressType: discovery.AddressTypeIPv4,
  4101  		Endpoints: []discovery.Endpoint{{
  4102  			Addresses: []string{"10.0.1.1"},
  4103  			Conditions: discovery.EndpointConditions{
  4104  				Ready:       ptr.To(false),
  4105  				Serving:     ptr.To(true),
  4106  				Terminating: ptr.To(false),
  4107  			},
  4108  			NodeName: ptr.To(testHostname),
  4109  		}, {
  4110  			Addresses: []string{"10.0.1.2"},
  4111  			Conditions: discovery.EndpointConditions{
  4112  				Ready:       ptr.To(false),
  4113  				Serving:     ptr.To(true),
  4114  				Terminating: ptr.To(true),
  4115  			},
  4116  			NodeName: ptr.To(testHostname),
  4117  		}, {
  4118  			Addresses: []string{"10.0.1.3"},
  4119  			Conditions: discovery.EndpointConditions{
  4120  				Ready:       ptr.To(false),
  4121  				Serving:     ptr.To(true),
  4122  				Terminating: ptr.To(true),
  4123  			},
  4124  			NodeName: ptr.To(testHostname),
  4125  		}, { // not ready endpoints should be ignored
  4126  			Addresses: []string{"10.0.1.4"},
  4127  			Conditions: discovery.EndpointConditions{
  4128  				Ready:       ptr.To(false),
  4129  				Serving:     ptr.To(false),
  4130  				Terminating: ptr.To(true),
  4131  			},
  4132  			NodeName: ptr.To(testHostname),
  4133  		}},
  4134  	}
  4135  
  4136  	fp.OnEndpointSliceUpdate(endpointSlice, endpointSliceTerminating)
  4137  	_ = fp.endpointsMap.Update(fp.endpointsChanges)
  4138  	localReadyEndpoints = fp.endpointsMap.LocalReadyEndpoints()
  4139  	if len(localReadyEndpoints) != 0 {
  4140  		t.Errorf("unexpected number of local ready endpoints, expected 0 but got: %d", len(localReadyEndpoints))
  4141  	}
  4142  }
  4143  
  4144  func TestProxierMetricsIptablesTotalRules(t *testing.T) {
  4145  	ipt := iptablestest.NewFake()
  4146  	fp := NewFakeProxier(ipt)
  4147  
  4148  	metrics.RegisterMetrics()
  4149  
  4150  	svcIP := "172.30.0.41"
  4151  	svcPort := 80
  4152  	nodePort := 31201
  4153  	svcPortName := proxy.ServicePortName{
  4154  		NamespacedName: makeNSN("ns1", "svc1"),
  4155  		Port:           "p80",
  4156  		Protocol:       v1.ProtocolTCP,
  4157  	}
  4158  
  4159  	makeServiceMap(fp,
  4160  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  4161  			svc.Spec.ClusterIP = svcIP
  4162  			svc.Spec.Ports = []v1.ServicePort{{
  4163  				Name:     svcPortName.Port,
  4164  				Port:     int32(svcPort),
  4165  				Protocol: v1.ProtocolTCP,
  4166  				NodePort: int32(nodePort),
  4167  			}}
  4168  		}),
  4169  	)
  4170  	fp.syncProxyRules()
  4171  	iptablesData := fp.iptablesData.String()
  4172  
  4173  	nFilterRules := countRulesFromMetric(utiliptables.TableFilter)
  4174  	expectedFilterRules := countRules(utiliptables.TableFilter, iptablesData)
  4175  
  4176  	if nFilterRules != expectedFilterRules {
  4177  		t.Fatalf("Wrong number of filter rule: expected %d got %d\n%s", expectedFilterRules, nFilterRules, iptablesData)
  4178  	}
  4179  
  4180  	nNatRules := countRulesFromMetric(utiliptables.TableNAT)
  4181  	expectedNatRules := countRules(utiliptables.TableNAT, iptablesData)
  4182  
  4183  	if nNatRules != expectedNatRules {
  4184  		t.Fatalf("Wrong number of nat rules: expected %d got %d\n%s", expectedNatRules, nNatRules, iptablesData)
  4185  	}
  4186  
  4187  	populateEndpointSlices(fp,
  4188  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  4189  			eps.AddressType = discovery.AddressTypeIPv4
  4190  			eps.Endpoints = []discovery.Endpoint{{
  4191  				Addresses: []string{"10.0.0.2"},
  4192  			}, {
  4193  				Addresses: []string{"10.0.0.5"},
  4194  			}}
  4195  			eps.Ports = []discovery.EndpointPort{{
  4196  				Name:     ptr.To(svcPortName.Port),
  4197  				Port:     ptr.To(int32(svcPort)),
  4198  				Protocol: ptr.To(v1.ProtocolTCP),
  4199  			}}
  4200  		}),
  4201  	)
  4202  
  4203  	fp.syncProxyRules()
  4204  	iptablesData = fp.iptablesData.String()
  4205  
  4206  	nFilterRules = countRulesFromMetric(utiliptables.TableFilter)
  4207  	expectedFilterRules = countRules(utiliptables.TableFilter, iptablesData)
  4208  
  4209  	if nFilterRules != expectedFilterRules {
  4210  		t.Fatalf("Wrong number of filter rule: expected %d got %d\n%s", expectedFilterRules, nFilterRules, iptablesData)
  4211  	}
  4212  
  4213  	nNatRules = countRulesFromMetric(utiliptables.TableNAT)
  4214  	expectedNatRules = countRules(utiliptables.TableNAT, iptablesData)
  4215  
  4216  	if nNatRules != expectedNatRules {
  4217  		t.Fatalf("Wrong number of nat rules: expected %d got %d\n%s", expectedNatRules, nNatRules, iptablesData)
  4218  	}
  4219  }
  4220  
  4221  // TODO(thockin): add *more* tests for syncProxyRules() or break it down further and test the pieces.
  4222  
  4223  // This test ensures that the iptables proxier supports translating Endpoints to
  4224  // iptables output when internalTrafficPolicy is specified
  4225  func TestInternalTrafficPolicy(t *testing.T) {
  4226  	type endpoint struct {
  4227  		ip       string
  4228  		hostname string
  4229  	}
  4230  
  4231  	testCases := []struct {
  4232  		name                  string
  4233  		line                  int
  4234  		internalTrafficPolicy *v1.ServiceInternalTrafficPolicy
  4235  		endpoints             []endpoint
  4236  		flowTests             []packetFlowTest
  4237  	}{
  4238  		{
  4239  			name:                  "internalTrafficPolicy is cluster",
  4240  			line:                  getLine(),
  4241  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyCluster),
  4242  			endpoints: []endpoint{
  4243  				{"10.0.1.1", testHostname},
  4244  				{"10.0.1.2", "host1"},
  4245  				{"10.0.1.3", "host2"},
  4246  			},
  4247  			flowTests: []packetFlowTest{
  4248  				{
  4249  					name:     "pod to ClusterIP hits all endpoints",
  4250  					sourceIP: "10.0.0.2",
  4251  					destIP:   "172.30.1.1",
  4252  					destPort: 80,
  4253  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.3:80",
  4254  					masq:     false,
  4255  				},
  4256  			},
  4257  		},
  4258  		{
  4259  			name:                  "internalTrafficPolicy is local and there is one local endpoint",
  4260  			line:                  getLine(),
  4261  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4262  			endpoints: []endpoint{
  4263  				{"10.0.1.1", testHostname},
  4264  				{"10.0.1.2", "host1"},
  4265  				{"10.0.1.3", "host2"},
  4266  			},
  4267  			flowTests: []packetFlowTest{
  4268  				{
  4269  					name:     "pod to ClusterIP hits only local endpoint",
  4270  					sourceIP: "10.0.0.2",
  4271  					destIP:   "172.30.1.1",
  4272  					destPort: 80,
  4273  					output:   "10.0.1.1:80",
  4274  					masq:     false,
  4275  				},
  4276  			},
  4277  		},
  4278  		{
  4279  			name:                  "internalTrafficPolicy is local and there are multiple local endpoints",
  4280  			line:                  getLine(),
  4281  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4282  			endpoints: []endpoint{
  4283  				{"10.0.1.1", testHostname},
  4284  				{"10.0.1.2", testHostname},
  4285  				{"10.0.1.3", "host2"},
  4286  			},
  4287  			flowTests: []packetFlowTest{
  4288  				{
  4289  					name:     "pod to ClusterIP hits all local endpoints",
  4290  					sourceIP: "10.0.0.2",
  4291  					destIP:   "172.30.1.1",
  4292  					destPort: 80,
  4293  					output:   "10.0.1.1:80, 10.0.1.2:80",
  4294  					masq:     false,
  4295  				},
  4296  			},
  4297  		},
  4298  		{
  4299  			name:                  "internalTrafficPolicy is local and there are no local endpoints",
  4300  			line:                  getLine(),
  4301  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4302  			endpoints: []endpoint{
  4303  				{"10.0.1.1", "host0"},
  4304  				{"10.0.1.2", "host1"},
  4305  				{"10.0.1.3", "host2"},
  4306  			},
  4307  			flowTests: []packetFlowTest{
  4308  				{
  4309  					name:     "no endpoints",
  4310  					sourceIP: "10.0.0.2",
  4311  					destIP:   "172.30.1.1",
  4312  					destPort: 80,
  4313  					output:   "DROP",
  4314  				},
  4315  			},
  4316  		},
  4317  	}
  4318  
  4319  	for _, tc := range testCases {
  4320  		t.Run(tc.name, func(t *testing.T) {
  4321  			ipt := iptablestest.NewFake()
  4322  			fp := NewFakeProxier(ipt)
  4323  			fp.OnServiceSynced()
  4324  			fp.OnEndpointSlicesSynced()
  4325  
  4326  			serviceName := "svc1"
  4327  			namespaceName := "ns1"
  4328  
  4329  			svc := &v1.Service{
  4330  				ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
  4331  				Spec: v1.ServiceSpec{
  4332  					ClusterIP: "172.30.1.1",
  4333  					Selector:  map[string]string{"foo": "bar"},
  4334  					Ports:     []v1.ServicePort{{Name: "", Port: 80, Protocol: v1.ProtocolTCP}},
  4335  				},
  4336  			}
  4337  			if tc.internalTrafficPolicy != nil {
  4338  				svc.Spec.InternalTrafficPolicy = tc.internalTrafficPolicy
  4339  			}
  4340  
  4341  			fp.OnServiceAdd(svc)
  4342  
  4343  			endpointSlice := &discovery.EndpointSlice{
  4344  				ObjectMeta: metav1.ObjectMeta{
  4345  					Name:      fmt.Sprintf("%s-1", serviceName),
  4346  					Namespace: namespaceName,
  4347  					Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  4348  				},
  4349  				Ports: []discovery.EndpointPort{{
  4350  					Name:     ptr.To(""),
  4351  					Port:     ptr.To[int32](80),
  4352  					Protocol: ptr.To(v1.ProtocolTCP),
  4353  				}},
  4354  				AddressType: discovery.AddressTypeIPv4,
  4355  			}
  4356  			for _, ep := range tc.endpoints {
  4357  				endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{
  4358  					Addresses:  []string{ep.ip},
  4359  					Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  4360  					NodeName:   ptr.To(ep.hostname),
  4361  				})
  4362  			}
  4363  
  4364  			fp.OnEndpointSliceAdd(endpointSlice)
  4365  			fp.syncProxyRules()
  4366  			runPacketFlowTests(t, tc.line, ipt, testNodeIPs, tc.flowTests)
  4367  
  4368  			fp.OnEndpointSliceDelete(endpointSlice)
  4369  			fp.syncProxyRules()
  4370  			runPacketFlowTests(t, tc.line, ipt, testNodeIPs, []packetFlowTest{
  4371  				{
  4372  					name:     "endpoints deleted",
  4373  					sourceIP: "10.0.0.2",
  4374  					destIP:   "172.30.1.1",
  4375  					destPort: 80,
  4376  					output:   "REJECT",
  4377  				},
  4378  			})
  4379  		})
  4380  	}
  4381  }
  4382  
  4383  // TestTerminatingEndpointsTrafficPolicyLocal tests that when there are local ready and
  4384  // ready + terminating endpoints, only the ready endpoints are used.
  4385  func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) {
  4386  	service := &v1.Service{
  4387  		ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"},
  4388  		Spec: v1.ServiceSpec{
  4389  			ClusterIP:             "172.30.1.1",
  4390  			Type:                  v1.ServiceTypeLoadBalancer,
  4391  			ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  4392  			Ports: []v1.ServicePort{
  4393  				{
  4394  					Name:       "",
  4395  					TargetPort: intstr.FromInt32(80),
  4396  					Port:       80,
  4397  					Protocol:   v1.ProtocolTCP,
  4398  				},
  4399  			},
  4400  			HealthCheckNodePort: 30000,
  4401  		},
  4402  		Status: v1.ServiceStatus{
  4403  			LoadBalancer: v1.LoadBalancerStatus{
  4404  				Ingress: []v1.LoadBalancerIngress{
  4405  					{IP: "1.2.3.4"},
  4406  				},
  4407  			},
  4408  		},
  4409  	}
  4410  
  4411  	testcases := []struct {
  4412  		name          string
  4413  		line          int
  4414  		endpointslice *discovery.EndpointSlice
  4415  		flowTests     []packetFlowTest
  4416  	}{
  4417  		{
  4418  			name: "ready endpoints exist",
  4419  			line: getLine(),
  4420  			endpointslice: &discovery.EndpointSlice{
  4421  				ObjectMeta: metav1.ObjectMeta{
  4422  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4423  					Namespace: "ns1",
  4424  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4425  				},
  4426  				Ports: []discovery.EndpointPort{{
  4427  					Name:     ptr.To(""),
  4428  					Port:     ptr.To[int32](80),
  4429  					Protocol: ptr.To(v1.ProtocolTCP),
  4430  				}},
  4431  				AddressType: discovery.AddressTypeIPv4,
  4432  				Endpoints: []discovery.Endpoint{
  4433  					{
  4434  						Addresses: []string{"10.0.1.1"},
  4435  						Conditions: discovery.EndpointConditions{
  4436  							Ready:       ptr.To(true),
  4437  							Serving:     ptr.To(true),
  4438  							Terminating: ptr.To(false),
  4439  						},
  4440  						NodeName: ptr.To(testHostname),
  4441  					},
  4442  					{
  4443  						Addresses: []string{"10.0.1.2"},
  4444  						Conditions: discovery.EndpointConditions{
  4445  							Ready:       ptr.To(true),
  4446  							Serving:     ptr.To(true),
  4447  							Terminating: ptr.To(false),
  4448  						},
  4449  						NodeName: ptr.To(testHostname),
  4450  					},
  4451  					{
  4452  						// this endpoint should be ignored for external since there are ready non-terminating endpoints
  4453  						Addresses: []string{"10.0.1.3"},
  4454  						Conditions: discovery.EndpointConditions{
  4455  							Ready:       ptr.To(false),
  4456  							Serving:     ptr.To(true),
  4457  							Terminating: ptr.To(true),
  4458  						},
  4459  						NodeName: ptr.To(testHostname),
  4460  					},
  4461  					{
  4462  						// this endpoint should be ignored for external since there are ready non-terminating endpoints
  4463  						Addresses: []string{"10.0.1.4"},
  4464  						Conditions: discovery.EndpointConditions{
  4465  							Ready:       ptr.To(false),
  4466  							Serving:     ptr.To(false),
  4467  							Terminating: ptr.To(true),
  4468  						},
  4469  						NodeName: ptr.To(testHostname),
  4470  					},
  4471  					{
  4472  						// this endpoint should be ignored for external since it's not local
  4473  						Addresses: []string{"10.0.1.5"},
  4474  						Conditions: discovery.EndpointConditions{
  4475  							Ready:       ptr.To(true),
  4476  							Serving:     ptr.To(true),
  4477  							Terminating: ptr.To(false),
  4478  						},
  4479  						NodeName: ptr.To("host-1"),
  4480  					},
  4481  				},
  4482  			},
  4483  			flowTests: []packetFlowTest{
  4484  				{
  4485  					name:     "pod to clusterIP",
  4486  					sourceIP: "10.0.0.2",
  4487  					destIP:   "172.30.1.1",
  4488  					destPort: 80,
  4489  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
  4490  					masq:     false,
  4491  				},
  4492  				{
  4493  					name:     "external to LB",
  4494  					sourceIP: testExternalClient,
  4495  					destIP:   "1.2.3.4",
  4496  					destPort: 80,
  4497  					output:   "10.0.1.1:80, 10.0.1.2:80",
  4498  					masq:     false,
  4499  				},
  4500  			},
  4501  		},
  4502  		{
  4503  			name: "only terminating endpoints exist",
  4504  			line: getLine(),
  4505  			endpointslice: &discovery.EndpointSlice{
  4506  				ObjectMeta: metav1.ObjectMeta{
  4507  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4508  					Namespace: "ns1",
  4509  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4510  				},
  4511  				Ports: []discovery.EndpointPort{{
  4512  					Name:     ptr.To(""),
  4513  					Port:     ptr.To[int32](80),
  4514  					Protocol: ptr.To(v1.ProtocolTCP),
  4515  				}},
  4516  				AddressType: discovery.AddressTypeIPv4,
  4517  				Endpoints: []discovery.Endpoint{
  4518  					{
  4519  						// this endpoint should be used since there are only ready terminating endpoints
  4520  						Addresses: []string{"10.0.1.2"},
  4521  						Conditions: discovery.EndpointConditions{
  4522  							Ready:       ptr.To(false),
  4523  							Serving:     ptr.To(true),
  4524  							Terminating: ptr.To(true),
  4525  						},
  4526  						NodeName: ptr.To(testHostname),
  4527  					},
  4528  					{
  4529  						// this endpoint should be used since there are only ready terminating endpoints
  4530  						Addresses: []string{"10.0.1.3"},
  4531  						Conditions: discovery.EndpointConditions{
  4532  							Ready:       ptr.To(false),
  4533  							Serving:     ptr.To(true),
  4534  							Terminating: ptr.To(true),
  4535  						},
  4536  						NodeName: ptr.To(testHostname),
  4537  					},
  4538  					{
  4539  						// this endpoint should not be used since it is both terminating and not ready.
  4540  						Addresses: []string{"10.0.1.4"},
  4541  						Conditions: discovery.EndpointConditions{
  4542  							Ready:       ptr.To(false),
  4543  							Serving:     ptr.To(false),
  4544  							Terminating: ptr.To(true),
  4545  						},
  4546  						NodeName: ptr.To(testHostname),
  4547  					},
  4548  					{
  4549  						// this endpoint should be ignored for external since it's not local
  4550  						Addresses: []string{"10.0.1.5"},
  4551  						Conditions: discovery.EndpointConditions{
  4552  							Ready:       ptr.To(true),
  4553  							Serving:     ptr.To(true),
  4554  							Terminating: ptr.To(false),
  4555  						},
  4556  						NodeName: ptr.To("host-1"),
  4557  					},
  4558  				},
  4559  			},
  4560  			flowTests: []packetFlowTest{
  4561  				{
  4562  					name:     "pod to clusterIP",
  4563  					sourceIP: "10.0.0.2",
  4564  					destIP:   "172.30.1.1",
  4565  					destPort: 80,
  4566  					output:   "10.0.1.5:80",
  4567  					masq:     false,
  4568  				},
  4569  				{
  4570  					name:     "external to LB",
  4571  					sourceIP: testExternalClient,
  4572  					destIP:   "1.2.3.4",
  4573  					destPort: 80,
  4574  					output:   "10.0.1.2:80, 10.0.1.3:80",
  4575  					masq:     false,
  4576  				},
  4577  			},
  4578  		},
  4579  		{
  4580  			name: "terminating endpoints on remote node",
  4581  			line: getLine(),
  4582  			endpointslice: &discovery.EndpointSlice{
  4583  				ObjectMeta: metav1.ObjectMeta{
  4584  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4585  					Namespace: "ns1",
  4586  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4587  				},
  4588  				Ports: []discovery.EndpointPort{{
  4589  					Name:     ptr.To(""),
  4590  					Port:     ptr.To[int32](80),
  4591  					Protocol: ptr.To(v1.ProtocolTCP),
  4592  				}},
  4593  				AddressType: discovery.AddressTypeIPv4,
  4594  				Endpoints: []discovery.Endpoint{
  4595  					{
  4596  						// this endpoint won't be used because it's not local,
  4597  						// but it will prevent a REJECT rule from being created
  4598  						Addresses: []string{"10.0.1.5"},
  4599  						Conditions: discovery.EndpointConditions{
  4600  							Ready:       ptr.To(false),
  4601  							Serving:     ptr.To(true),
  4602  							Terminating: ptr.To(true),
  4603  						},
  4604  						NodeName: ptr.To("host-1"),
  4605  					},
  4606  				},
  4607  			},
  4608  			flowTests: []packetFlowTest{
  4609  				{
  4610  					name:     "pod to clusterIP",
  4611  					sourceIP: "10.0.0.2",
  4612  					destIP:   "172.30.1.1",
  4613  					destPort: 80,
  4614  					output:   "10.0.1.5:80",
  4615  				},
  4616  				{
  4617  					name:     "external to LB, no locally-usable endpoints",
  4618  					sourceIP: testExternalClient,
  4619  					destIP:   "1.2.3.4",
  4620  					destPort: 80,
  4621  					output:   "DROP",
  4622  				},
  4623  			},
  4624  		},
  4625  		{
  4626  			name: "no usable endpoints on any node",
  4627  			line: getLine(),
  4628  			endpointslice: &discovery.EndpointSlice{
  4629  				ObjectMeta: metav1.ObjectMeta{
  4630  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4631  					Namespace: "ns1",
  4632  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4633  				},
  4634  				Ports: []discovery.EndpointPort{{
  4635  					Name:     ptr.To(""),
  4636  					Port:     ptr.To[int32](80),
  4637  					Protocol: ptr.To(v1.ProtocolTCP),
  4638  				}},
  4639  				AddressType: discovery.AddressTypeIPv4,
  4640  				Endpoints: []discovery.Endpoint{
  4641  					{
  4642  						// Local but not ready or serving
  4643  						Addresses: []string{"10.0.1.5"},
  4644  						Conditions: discovery.EndpointConditions{
  4645  							Ready:       ptr.To(false),
  4646  							Serving:     ptr.To(false),
  4647  							Terminating: ptr.To(true),
  4648  						},
  4649  						NodeName: ptr.To(testHostname),
  4650  					},
  4651  					{
  4652  						// Remote and not ready or serving
  4653  						Addresses: []string{"10.0.1.5"},
  4654  						Conditions: discovery.EndpointConditions{
  4655  							Ready:       ptr.To(false),
  4656  							Serving:     ptr.To(false),
  4657  							Terminating: ptr.To(true),
  4658  						},
  4659  						NodeName: ptr.To("host-1"),
  4660  					},
  4661  				},
  4662  			},
  4663  			flowTests: []packetFlowTest{
  4664  				{
  4665  					name:     "pod to clusterIP, no usable endpoints",
  4666  					sourceIP: "10.0.0.2",
  4667  					destIP:   "172.30.1.1",
  4668  					destPort: 80,
  4669  					output:   "REJECT",
  4670  				},
  4671  				{
  4672  					name:     "external to LB, no usable endpoints",
  4673  					sourceIP: testExternalClient,
  4674  					destIP:   "1.2.3.4",
  4675  					destPort: 80,
  4676  					output:   "REJECT",
  4677  				},
  4678  			},
  4679  		},
  4680  	}
  4681  
  4682  	for _, testcase := range testcases {
  4683  		t.Run(testcase.name, func(t *testing.T) {
  4684  			ipt := iptablestest.NewFake()
  4685  			fp := NewFakeProxier(ipt)
  4686  			fp.OnServiceSynced()
  4687  			fp.OnEndpointSlicesSynced()
  4688  
  4689  			fp.OnServiceAdd(service)
  4690  
  4691  			fp.OnEndpointSliceAdd(testcase.endpointslice)
  4692  			fp.syncProxyRules()
  4693  			runPacketFlowTests(t, testcase.line, ipt, testNodeIPs, testcase.flowTests)
  4694  
  4695  			fp.OnEndpointSliceDelete(testcase.endpointslice)
  4696  			fp.syncProxyRules()
  4697  			runPacketFlowTests(t, testcase.line, ipt, testNodeIPs, []packetFlowTest{
  4698  				{
  4699  					name:     "pod to clusterIP after endpoints deleted",
  4700  					sourceIP: "10.0.0.2",
  4701  					destIP:   "172.30.1.1",
  4702  					destPort: 80,
  4703  					output:   "REJECT",
  4704  				},
  4705  				{
  4706  					name:     "external to LB after endpoints deleted",
  4707  					sourceIP: testExternalClient,
  4708  					destIP:   "1.2.3.4",
  4709  					destPort: 80,
  4710  					output:   "REJECT",
  4711  				},
  4712  			})
  4713  		})
  4714  	}
  4715  }
  4716  
  4717  // TestTerminatingEndpointsTrafficPolicyCluster tests that when there are cluster-wide
  4718  // ready and ready + terminating endpoints, only the ready endpoints are used.
  4719  func TestTerminatingEndpointsTrafficPolicyCluster(t *testing.T) {
  4720  	service := &v1.Service{
  4721  		ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"},
  4722  		Spec: v1.ServiceSpec{
  4723  			ClusterIP:             "172.30.1.1",
  4724  			Type:                  v1.ServiceTypeLoadBalancer,
  4725  			ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyCluster,
  4726  			Ports: []v1.ServicePort{
  4727  				{
  4728  					Name:       "",
  4729  					TargetPort: intstr.FromInt32(80),
  4730  					Port:       80,
  4731  					Protocol:   v1.ProtocolTCP,
  4732  				},
  4733  			},
  4734  			HealthCheckNodePort: 30000,
  4735  		},
  4736  		Status: v1.ServiceStatus{
  4737  			LoadBalancer: v1.LoadBalancerStatus{
  4738  				Ingress: []v1.LoadBalancerIngress{
  4739  					{IP: "1.2.3.4"},
  4740  				},
  4741  			},
  4742  		},
  4743  	}
  4744  
  4745  	testcases := []struct {
  4746  		name          string
  4747  		line          int
  4748  		endpointslice *discovery.EndpointSlice
  4749  		flowTests     []packetFlowTest
  4750  	}{
  4751  		{
  4752  			name: "ready endpoints exist",
  4753  			line: getLine(),
  4754  			endpointslice: &discovery.EndpointSlice{
  4755  				ObjectMeta: metav1.ObjectMeta{
  4756  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4757  					Namespace: "ns1",
  4758  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4759  				},
  4760  				Ports: []discovery.EndpointPort{{
  4761  					Name:     ptr.To(""),
  4762  					Port:     ptr.To[int32](80),
  4763  					Protocol: ptr.To(v1.ProtocolTCP),
  4764  				}},
  4765  				AddressType: discovery.AddressTypeIPv4,
  4766  				Endpoints: []discovery.Endpoint{
  4767  					{
  4768  						Addresses: []string{"10.0.1.1"},
  4769  						Conditions: discovery.EndpointConditions{
  4770  							Ready:       ptr.To(true),
  4771  							Serving:     ptr.To(true),
  4772  							Terminating: ptr.To(false),
  4773  						},
  4774  						NodeName: ptr.To(testHostname),
  4775  					},
  4776  					{
  4777  						Addresses: []string{"10.0.1.2"},
  4778  						Conditions: discovery.EndpointConditions{
  4779  							Ready:       ptr.To(true),
  4780  							Serving:     ptr.To(true),
  4781  							Terminating: ptr.To(false),
  4782  						},
  4783  						NodeName: ptr.To(testHostname),
  4784  					},
  4785  					{
  4786  						// this endpoint should be ignored since there are ready non-terminating endpoints
  4787  						Addresses: []string{"10.0.1.3"},
  4788  						Conditions: discovery.EndpointConditions{
  4789  							Ready:       ptr.To(false),
  4790  							Serving:     ptr.To(true),
  4791  							Terminating: ptr.To(true),
  4792  						},
  4793  						NodeName: ptr.To("another-host"),
  4794  					},
  4795  					{
  4796  						// this endpoint should be ignored since it is not "serving"
  4797  						Addresses: []string{"10.0.1.4"},
  4798  						Conditions: discovery.EndpointConditions{
  4799  							Ready:       ptr.To(false),
  4800  							Serving:     ptr.To(false),
  4801  							Terminating: ptr.To(true),
  4802  						},
  4803  						NodeName: ptr.To("another-host"),
  4804  					},
  4805  					{
  4806  						Addresses: []string{"10.0.1.5"},
  4807  						Conditions: discovery.EndpointConditions{
  4808  							Ready:       ptr.To(true),
  4809  							Serving:     ptr.To(true),
  4810  							Terminating: ptr.To(false),
  4811  						},
  4812  						NodeName: ptr.To("another-host"),
  4813  					},
  4814  				},
  4815  			},
  4816  			flowTests: []packetFlowTest{
  4817  				{
  4818  					name:     "pod to clusterIP",
  4819  					sourceIP: "10.0.0.2",
  4820  					destIP:   "172.30.1.1",
  4821  					destPort: 80,
  4822  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
  4823  					masq:     false,
  4824  				},
  4825  				{
  4826  					name:     "external to LB",
  4827  					sourceIP: testExternalClient,
  4828  					destIP:   "1.2.3.4",
  4829  					destPort: 80,
  4830  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
  4831  					masq:     true,
  4832  				},
  4833  			},
  4834  		},
  4835  		{
  4836  			name: "only terminating endpoints exist",
  4837  			line: getLine(),
  4838  			endpointslice: &discovery.EndpointSlice{
  4839  				ObjectMeta: metav1.ObjectMeta{
  4840  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4841  					Namespace: "ns1",
  4842  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4843  				},
  4844  				Ports: []discovery.EndpointPort{{
  4845  					Name:     ptr.To(""),
  4846  					Port:     ptr.To[int32](80),
  4847  					Protocol: ptr.To(v1.ProtocolTCP),
  4848  				}},
  4849  				AddressType: discovery.AddressTypeIPv4,
  4850  				Endpoints: []discovery.Endpoint{
  4851  					{
  4852  						// this endpoint should be used since there are only ready terminating endpoints
  4853  						Addresses: []string{"10.0.1.2"},
  4854  						Conditions: discovery.EndpointConditions{
  4855  							Ready:       ptr.To(false),
  4856  							Serving:     ptr.To(true),
  4857  							Terminating: ptr.To(true),
  4858  						},
  4859  						NodeName: ptr.To(testHostname),
  4860  					},
  4861  					{
  4862  						// this endpoint should be used since there are only ready terminating endpoints
  4863  						Addresses: []string{"10.0.1.3"},
  4864  						Conditions: discovery.EndpointConditions{
  4865  							Ready:       ptr.To(false),
  4866  							Serving:     ptr.To(true),
  4867  							Terminating: ptr.To(true),
  4868  						},
  4869  						NodeName: ptr.To(testHostname),
  4870  					},
  4871  					{
  4872  						// this endpoint should not be used since it is both terminating and not ready.
  4873  						Addresses: []string{"10.0.1.4"},
  4874  						Conditions: discovery.EndpointConditions{
  4875  							Ready:       ptr.To(false),
  4876  							Serving:     ptr.To(false),
  4877  							Terminating: ptr.To(true),
  4878  						},
  4879  						NodeName: ptr.To("another-host"),
  4880  					},
  4881  					{
  4882  						// this endpoint should be used since there are only ready terminating endpoints
  4883  						Addresses: []string{"10.0.1.5"},
  4884  						Conditions: discovery.EndpointConditions{
  4885  							Ready:       ptr.To(false),
  4886  							Serving:     ptr.To(true),
  4887  							Terminating: ptr.To(true),
  4888  						},
  4889  						NodeName: ptr.To("another-host"),
  4890  					},
  4891  				},
  4892  			},
  4893  			flowTests: []packetFlowTest{
  4894  				{
  4895  					name:     "pod to clusterIP",
  4896  					sourceIP: "10.0.0.2",
  4897  					destIP:   "172.30.1.1",
  4898  					destPort: 80,
  4899  					output:   "10.0.1.2:80, 10.0.1.3:80, 10.0.1.5:80",
  4900  					masq:     false,
  4901  				},
  4902  				{
  4903  					name:     "external to LB",
  4904  					sourceIP: testExternalClient,
  4905  					destIP:   "1.2.3.4",
  4906  					destPort: 80,
  4907  					output:   "10.0.1.2:80, 10.0.1.3:80, 10.0.1.5:80",
  4908  					masq:     true,
  4909  				},
  4910  			},
  4911  		},
  4912  		{
  4913  			name: "terminating endpoints on remote node",
  4914  			line: getLine(),
  4915  			endpointslice: &discovery.EndpointSlice{
  4916  				ObjectMeta: metav1.ObjectMeta{
  4917  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4918  					Namespace: "ns1",
  4919  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4920  				},
  4921  				Ports: []discovery.EndpointPort{{
  4922  					Name:     ptr.To(""),
  4923  					Port:     ptr.To[int32](80),
  4924  					Protocol: ptr.To(v1.ProtocolTCP),
  4925  				}},
  4926  				AddressType: discovery.AddressTypeIPv4,
  4927  				Endpoints: []discovery.Endpoint{
  4928  					{
  4929  						Addresses: []string{"10.0.1.5"},
  4930  						Conditions: discovery.EndpointConditions{
  4931  							Ready:       ptr.To(false),
  4932  							Serving:     ptr.To(true),
  4933  							Terminating: ptr.To(true),
  4934  						},
  4935  						NodeName: ptr.To("host-1"),
  4936  					},
  4937  				},
  4938  			},
  4939  			flowTests: []packetFlowTest{
  4940  				{
  4941  					name:     "pod to clusterIP",
  4942  					sourceIP: "10.0.0.2",
  4943  					destIP:   "172.30.1.1",
  4944  					destPort: 80,
  4945  					output:   "10.0.1.5:80",
  4946  					masq:     false,
  4947  				},
  4948  				{
  4949  					name:     "external to LB",
  4950  					sourceIP: testExternalClient,
  4951  					destIP:   "1.2.3.4",
  4952  					destPort: 80,
  4953  					output:   "10.0.1.5:80",
  4954  					masq:     true,
  4955  				},
  4956  			},
  4957  		},
  4958  		{
  4959  			name: "no usable endpoints on any node",
  4960  			line: getLine(),
  4961  			endpointslice: &discovery.EndpointSlice{
  4962  				ObjectMeta: metav1.ObjectMeta{
  4963  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4964  					Namespace: "ns1",
  4965  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4966  				},
  4967  				Ports: []discovery.EndpointPort{{
  4968  					Name:     ptr.To(""),
  4969  					Port:     ptr.To[int32](80),
  4970  					Protocol: ptr.To(v1.ProtocolTCP),
  4971  				}},
  4972  				AddressType: discovery.AddressTypeIPv4,
  4973  				Endpoints: []discovery.Endpoint{
  4974  					{
  4975  						// Local, not ready or serving
  4976  						Addresses: []string{"10.0.1.5"},
  4977  						Conditions: discovery.EndpointConditions{
  4978  							Ready:       ptr.To(false),
  4979  							Serving:     ptr.To(false),
  4980  							Terminating: ptr.To(true),
  4981  						},
  4982  						NodeName: ptr.To(testHostname),
  4983  					},
  4984  					{
  4985  						// Remote, not ready or serving
  4986  						Addresses: []string{"10.0.1.5"},
  4987  						Conditions: discovery.EndpointConditions{
  4988  							Ready:       ptr.To(false),
  4989  							Serving:     ptr.To(false),
  4990  							Terminating: ptr.To(true),
  4991  						},
  4992  						NodeName: ptr.To("host-1"),
  4993  					},
  4994  				},
  4995  			},
  4996  			flowTests: []packetFlowTest{
  4997  				{
  4998  					name:     "pod to clusterIP",
  4999  					sourceIP: "10.0.0.2",
  5000  					destIP:   "172.30.1.1",
  5001  					destPort: 80,
  5002  					output:   "REJECT",
  5003  				},
  5004  				{
  5005  					name:     "external to LB",
  5006  					sourceIP: testExternalClient,
  5007  					destIP:   "1.2.3.4",
  5008  					destPort: 80,
  5009  					output:   "REJECT",
  5010  				},
  5011  			},
  5012  		},
  5013  	}
  5014  
  5015  	for _, testcase := range testcases {
  5016  		t.Run(testcase.name, func(t *testing.T) {
  5017  
  5018  			ipt := iptablestest.NewFake()
  5019  			fp := NewFakeProxier(ipt)
  5020  			fp.OnServiceSynced()
  5021  			fp.OnEndpointSlicesSynced()
  5022  
  5023  			fp.OnServiceAdd(service)
  5024  
  5025  			fp.OnEndpointSliceAdd(testcase.endpointslice)
  5026  			fp.syncProxyRules()
  5027  			runPacketFlowTests(t, testcase.line, ipt, testNodeIPs, testcase.flowTests)
  5028  
  5029  			fp.OnEndpointSliceDelete(testcase.endpointslice)
  5030  			fp.syncProxyRules()
  5031  			runPacketFlowTests(t, testcase.line, ipt, testNodeIPs, []packetFlowTest{
  5032  				{
  5033  					name:     "pod to clusterIP after endpoints deleted",
  5034  					sourceIP: "10.0.0.2",
  5035  					destIP:   "172.30.1.1",
  5036  					destPort: 80,
  5037  					output:   "REJECT",
  5038  				},
  5039  				{
  5040  					name:     "external to LB after endpoints deleted",
  5041  					sourceIP: testExternalClient,
  5042  					destIP:   "1.2.3.4",
  5043  					destPort: 80,
  5044  					output:   "REJECT",
  5045  				},
  5046  			})
  5047  		})
  5048  	}
  5049  }
  5050  
  5051  func TestInternalExternalMasquerade(t *testing.T) {
  5052  	// (Put the test setup code in an internal function so we can have it here at the
  5053  	// top, before the test cases that will be run against it.)
  5054  	setupTest := func(fp *Proxier) {
  5055  		makeServiceMap(fp,
  5056  			makeTestService("ns1", "svc1", func(svc *v1.Service) {
  5057  				svc.Spec.Type = "LoadBalancer"
  5058  				svc.Spec.ClusterIP = "172.30.0.41"
  5059  				svc.Spec.Ports = []v1.ServicePort{{
  5060  					Name:     "p80",
  5061  					Port:     80,
  5062  					Protocol: v1.ProtocolTCP,
  5063  					NodePort: int32(3001),
  5064  				}}
  5065  				svc.Spec.HealthCheckNodePort = 30001
  5066  				svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  5067  					IP: "1.2.3.4",
  5068  				}}
  5069  			}),
  5070  			makeTestService("ns2", "svc2", func(svc *v1.Service) {
  5071  				svc.Spec.Type = "LoadBalancer"
  5072  				svc.Spec.ClusterIP = "172.30.0.42"
  5073  				svc.Spec.Ports = []v1.ServicePort{{
  5074  					Name:     "p80",
  5075  					Port:     80,
  5076  					Protocol: v1.ProtocolTCP,
  5077  					NodePort: int32(3002),
  5078  				}}
  5079  				svc.Spec.HealthCheckNodePort = 30002
  5080  				svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  5081  				svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  5082  					IP: "5.6.7.8",
  5083  				}}
  5084  			}),
  5085  			makeTestService("ns3", "svc3", func(svc *v1.Service) {
  5086  				svc.Spec.Type = "LoadBalancer"
  5087  				svc.Spec.ClusterIP = "172.30.0.43"
  5088  				svc.Spec.Ports = []v1.ServicePort{{
  5089  					Name:     "p80",
  5090  					Port:     80,
  5091  					Protocol: v1.ProtocolTCP,
  5092  					NodePort: int32(3003),
  5093  				}}
  5094  				svc.Spec.HealthCheckNodePort = 30003
  5095  				svc.Spec.InternalTrafficPolicy = ptr.To(v1.ServiceInternalTrafficPolicyLocal)
  5096  				svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  5097  					IP: "9.10.11.12",
  5098  				}}
  5099  			}),
  5100  		)
  5101  
  5102  		populateEndpointSlices(fp,
  5103  			makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  5104  				eps.AddressType = discovery.AddressTypeIPv4
  5105  				eps.Endpoints = []discovery.Endpoint{
  5106  					{
  5107  						Addresses: []string{"10.180.0.1"},
  5108  						NodeName:  ptr.To(testHostname),
  5109  					},
  5110  					{
  5111  						Addresses: []string{"10.180.1.1"},
  5112  						NodeName:  ptr.To("remote"),
  5113  					},
  5114  				}
  5115  				eps.Ports = []discovery.EndpointPort{{
  5116  					Name:     ptr.To("p80"),
  5117  					Port:     ptr.To[int32](80),
  5118  					Protocol: ptr.To(v1.ProtocolTCP),
  5119  				}}
  5120  			}),
  5121  			makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  5122  				eps.AddressType = discovery.AddressTypeIPv4
  5123  				eps.Endpoints = []discovery.Endpoint{
  5124  					{
  5125  						Addresses: []string{"10.180.0.2"},
  5126  						NodeName:  ptr.To(testHostname),
  5127  					},
  5128  					{
  5129  						Addresses: []string{"10.180.1.2"},
  5130  						NodeName:  ptr.To("remote"),
  5131  					},
  5132  				}
  5133  				eps.Ports = []discovery.EndpointPort{{
  5134  					Name:     ptr.To("p80"),
  5135  					Port:     ptr.To[int32](80),
  5136  					Protocol: ptr.To(v1.ProtocolTCP),
  5137  				}}
  5138  			}),
  5139  			makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
  5140  				eps.AddressType = discovery.AddressTypeIPv4
  5141  				eps.Endpoints = []discovery.Endpoint{
  5142  					{
  5143  						Addresses: []string{"10.180.0.3"},
  5144  						NodeName:  ptr.To(testHostname),
  5145  					},
  5146  					{
  5147  						Addresses: []string{"10.180.1.3"},
  5148  						NodeName:  ptr.To("remote"),
  5149  					},
  5150  				}
  5151  				eps.Ports = []discovery.EndpointPort{{
  5152  					Name:     ptr.To("p80"),
  5153  					Port:     ptr.To[int32](80),
  5154  					Protocol: ptr.To(v1.ProtocolTCP),
  5155  				}}
  5156  			}),
  5157  		)
  5158  
  5159  		fp.syncProxyRules()
  5160  	}
  5161  
  5162  	// We use the same flowTests for all of the testCases. The "output" and "masq"
  5163  	// values here represent the normal case (working localDetector, no masqueradeAll)
  5164  	flowTests := []packetFlowTest{
  5165  		{
  5166  			name:     "pod to ClusterIP",
  5167  			sourceIP: "10.0.0.2",
  5168  			destIP:   "172.30.0.41",
  5169  			destPort: 80,
  5170  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5171  			masq:     false,
  5172  		},
  5173  		{
  5174  			name:     "pod to NodePort",
  5175  			sourceIP: "10.0.0.2",
  5176  			destIP:   testNodeIP,
  5177  			destPort: 3001,
  5178  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5179  			masq:     true,
  5180  		},
  5181  		{
  5182  			name:     "pod to LB",
  5183  			sourceIP: "10.0.0.2",
  5184  			destIP:   "1.2.3.4",
  5185  			destPort: 80,
  5186  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5187  			masq:     true,
  5188  		},
  5189  		{
  5190  			name:     "node to ClusterIP",
  5191  			sourceIP: testNodeIP,
  5192  			destIP:   "172.30.0.41",
  5193  			destPort: 80,
  5194  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5195  			masq:     true,
  5196  		},
  5197  		{
  5198  			name:     "node to NodePort",
  5199  			sourceIP: testNodeIP,
  5200  			destIP:   testNodeIP,
  5201  			destPort: 3001,
  5202  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5203  			masq:     true,
  5204  		},
  5205  		{
  5206  			name:     "localhost to NodePort",
  5207  			sourceIP: "127.0.0.1",
  5208  			destIP:   "127.0.0.1",
  5209  			destPort: 3001,
  5210  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5211  			masq:     true,
  5212  		},
  5213  		{
  5214  			name:     "node to LB",
  5215  			sourceIP: testNodeIP,
  5216  			destIP:   "1.2.3.4",
  5217  			destPort: 80,
  5218  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5219  			masq:     true,
  5220  		},
  5221  		{
  5222  			name:     "external to ClusterIP",
  5223  			sourceIP: testExternalClient,
  5224  			destIP:   "172.30.0.41",
  5225  			destPort: 80,
  5226  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5227  			masq:     true,
  5228  		},
  5229  		{
  5230  			name:     "external to NodePort",
  5231  			sourceIP: testExternalClient,
  5232  			destIP:   testNodeIP,
  5233  			destPort: 3001,
  5234  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5235  			masq:     true,
  5236  		},
  5237  		{
  5238  			name:     "external to LB",
  5239  			sourceIP: testExternalClient,
  5240  			destIP:   "1.2.3.4",
  5241  			destPort: 80,
  5242  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5243  			masq:     true,
  5244  		},
  5245  		{
  5246  			name:     "pod to ClusterIP with eTP:Local",
  5247  			sourceIP: "10.0.0.2",
  5248  			destIP:   "172.30.0.42",
  5249  			destPort: 80,
  5250  
  5251  			// externalTrafficPolicy does not apply to ClusterIP traffic, so same
  5252  			// as "Pod to ClusterIP"
  5253  			output: "10.180.0.2:80, 10.180.1.2:80",
  5254  			masq:   false,
  5255  		},
  5256  		{
  5257  			name:     "pod to NodePort with eTP:Local",
  5258  			sourceIP: "10.0.0.2",
  5259  			destIP:   testNodeIP,
  5260  			destPort: 3002,
  5261  
  5262  			// See the comment below in the "pod to LB with eTP:Local" case.
  5263  			// It doesn't actually make sense to short-circuit here, since if
  5264  			// you connect directly to a NodePort from outside the cluster,
  5265  			// you only get the local endpoints. But it's simpler for us and
  5266  			// slightly more convenient for users to have this case get
  5267  			// short-circuited too.
  5268  			output: "10.180.0.2:80, 10.180.1.2:80",
  5269  			masq:   false,
  5270  		},
  5271  		{
  5272  			name:     "pod to LB with eTP:Local",
  5273  			sourceIP: "10.0.0.2",
  5274  			destIP:   "5.6.7.8",
  5275  			destPort: 80,
  5276  
  5277  			// The short-circuit rule is supposed to make this behave the same
  5278  			// way it would if the packet actually went out to the LB and then
  5279  			// came back into the cluster. So it gets routed to all endpoints,
  5280  			// not just local ones. In reality, if the packet actually left
  5281  			// the cluster, it would have to get masqueraded, but since we can
  5282  			// avoid doing that in the short-circuit case, and not masquerading
  5283  			// is more useful, we avoid masquerading.
  5284  			output: "10.180.0.2:80, 10.180.1.2:80",
  5285  			masq:   false,
  5286  		},
  5287  		{
  5288  			name:     "node to ClusterIP with eTP:Local",
  5289  			sourceIP: testNodeIP,
  5290  			destIP:   "172.30.0.42",
  5291  			destPort: 80,
  5292  
  5293  			// externalTrafficPolicy does not apply to ClusterIP traffic, so same
  5294  			// as "node to ClusterIP"
  5295  			output: "10.180.0.2:80, 10.180.1.2:80",
  5296  			masq:   true,
  5297  		},
  5298  		{
  5299  			name:     "node to NodePort with eTP:Local",
  5300  			sourceIP: testNodeIP,
  5301  			destIP:   testNodeIP,
  5302  			destPort: 3001,
  5303  
  5304  			// The traffic gets short-circuited, ignoring externalTrafficPolicy, so
  5305  			// same as "node to NodePort" above.
  5306  			output: "10.180.0.1:80, 10.180.1.1:80",
  5307  			masq:   true,
  5308  		},
  5309  		{
  5310  			name:     "localhost to NodePort with eTP:Local",
  5311  			sourceIP: "127.0.0.1",
  5312  			destIP:   "127.0.0.1",
  5313  			destPort: 3002,
  5314  
  5315  			// The traffic gets short-circuited, ignoring externalTrafficPolicy, so
  5316  			// same as "localhost to NodePort" above.
  5317  			output: "10.180.0.2:80, 10.180.1.2:80",
  5318  			masq:   true,
  5319  		},
  5320  		{
  5321  			name:     "node to LB with eTP:Local",
  5322  			sourceIP: testNodeIP,
  5323  			destIP:   "5.6.7.8",
  5324  			destPort: 80,
  5325  
  5326  			// The traffic gets short-circuited, ignoring externalTrafficPolicy, so
  5327  			// same as "node to LB" above.
  5328  			output: "10.180.0.2:80, 10.180.1.2:80",
  5329  			masq:   true,
  5330  		},
  5331  		{
  5332  			name:     "external to ClusterIP with eTP:Local",
  5333  			sourceIP: testExternalClient,
  5334  			destIP:   "172.30.0.42",
  5335  			destPort: 80,
  5336  
  5337  			// externalTrafficPolicy does not apply to ClusterIP traffic, so same
  5338  			// as "external to ClusterIP" above.
  5339  			output: "10.180.0.2:80, 10.180.1.2:80",
  5340  			masq:   true,
  5341  		},
  5342  		{
  5343  			name:     "external to NodePort with eTP:Local",
  5344  			sourceIP: testExternalClient,
  5345  			destIP:   testNodeIP,
  5346  			destPort: 3002,
  5347  
  5348  			// externalTrafficPolicy applies; only the local endpoint is
  5349  			// selected, and we don't masquerade.
  5350  			output: "10.180.0.2:80",
  5351  			masq:   false,
  5352  		},
  5353  		{
  5354  			name:     "external to LB with eTP:Local",
  5355  			sourceIP: testExternalClient,
  5356  			destIP:   "5.6.7.8",
  5357  			destPort: 80,
  5358  
  5359  			// externalTrafficPolicy applies; only the local endpoint is
  5360  			// selected, and we don't masquerade.
  5361  			output: "10.180.0.2:80",
  5362  			masq:   false,
  5363  		},
  5364  		{
  5365  			name:     "pod to ClusterIP with iTP:Local",
  5366  			sourceIP: "10.0.0.2",
  5367  			destIP:   "172.30.0.43",
  5368  			destPort: 80,
  5369  
  5370  			// internalTrafficPolicy applies; only the local endpoint is
  5371  			// selected.
  5372  			output: "10.180.0.3:80",
  5373  			masq:   false,
  5374  		},
  5375  		{
  5376  			name:     "pod to NodePort with iTP:Local",
  5377  			sourceIP: "10.0.0.2",
  5378  			destIP:   testNodeIP,
  5379  			destPort: 3003,
  5380  
  5381  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  5382  			// "pod to NodePort" above.
  5383  			output: "10.180.0.3:80, 10.180.1.3:80",
  5384  			masq:   true,
  5385  		},
  5386  		{
  5387  			name:     "pod to LB with iTP:Local",
  5388  			sourceIP: "10.0.0.2",
  5389  			destIP:   "9.10.11.12",
  5390  			destPort: 80,
  5391  
  5392  			// internalTrafficPolicy does not apply to LoadBalancer traffic, so
  5393  			// same as "pod to LB" above.
  5394  			output: "10.180.0.3:80, 10.180.1.3:80",
  5395  			masq:   true,
  5396  		},
  5397  		{
  5398  			name:     "node to ClusterIP with iTP:Local",
  5399  			sourceIP: testNodeIP,
  5400  			destIP:   "172.30.0.43",
  5401  			destPort: 80,
  5402  
  5403  			// internalTrafficPolicy applies; only the local endpoint is selected.
  5404  			// Traffic is masqueraded as in the "node to ClusterIP" case because
  5405  			// internalTrafficPolicy does not affect masquerading.
  5406  			output: "10.180.0.3:80",
  5407  			masq:   true,
  5408  		},
  5409  		{
  5410  			name:     "node to NodePort with iTP:Local",
  5411  			sourceIP: testNodeIP,
  5412  			destIP:   testNodeIP,
  5413  			destPort: 3003,
  5414  
  5415  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  5416  			// "node to NodePort" above.
  5417  			output: "10.180.0.3:80, 10.180.1.3:80",
  5418  			masq:   true,
  5419  		},
  5420  		{
  5421  			name:     "localhost to NodePort with iTP:Local",
  5422  			sourceIP: "127.0.0.1",
  5423  			destIP:   "127.0.0.1",
  5424  			destPort: 3003,
  5425  
  5426  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  5427  			// "localhost to NodePort" above.
  5428  			output: "10.180.0.3:80, 10.180.1.3:80",
  5429  			masq:   true,
  5430  		},
  5431  		{
  5432  			name:     "node to LB with iTP:Local",
  5433  			sourceIP: testNodeIP,
  5434  			destIP:   "9.10.11.12",
  5435  			destPort: 80,
  5436  
  5437  			// internalTrafficPolicy does not apply to LoadBalancer traffic, so
  5438  			// same as "node to LB" above.
  5439  			output: "10.180.0.3:80, 10.180.1.3:80",
  5440  			masq:   true,
  5441  		},
  5442  		{
  5443  			name:     "external to ClusterIP with iTP:Local",
  5444  			sourceIP: testExternalClient,
  5445  			destIP:   "172.30.0.43",
  5446  			destPort: 80,
  5447  
  5448  			// internalTrafficPolicy applies; only the local endpoint is selected.
  5449  			// Traffic is masqueraded as in the "external to ClusterIP" case
  5450  			// because internalTrafficPolicy does not affect masquerading.
  5451  			output: "10.180.0.3:80",
  5452  			masq:   true,
  5453  		},
  5454  		{
  5455  			name:     "external to NodePort with iTP:Local",
  5456  			sourceIP: testExternalClient,
  5457  			destIP:   testNodeIP,
  5458  			destPort: 3003,
  5459  
  5460  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  5461  			// "external to NodePort" above.
  5462  			output: "10.180.0.3:80, 10.180.1.3:80",
  5463  			masq:   true,
  5464  		},
  5465  		{
  5466  			name:     "external to LB with iTP:Local",
  5467  			sourceIP: testExternalClient,
  5468  			destIP:   "9.10.11.12",
  5469  			destPort: 80,
  5470  
  5471  			// internalTrafficPolicy does not apply to LoadBalancer traffic, so
  5472  			// same as "external to LB" above.
  5473  			output: "10.180.0.3:80, 10.180.1.3:80",
  5474  			masq:   true,
  5475  		},
  5476  	}
  5477  
  5478  	type packetFlowTestOverride struct {
  5479  		output *string
  5480  		masq   *bool
  5481  	}
  5482  
  5483  	testCases := []struct {
  5484  		name          string
  5485  		line          int
  5486  		masqueradeAll bool
  5487  		localDetector bool
  5488  		overrides     map[string]packetFlowTestOverride
  5489  	}{
  5490  		{
  5491  			name:          "base",
  5492  			line:          getLine(),
  5493  			masqueradeAll: false,
  5494  			localDetector: true,
  5495  			overrides:     nil,
  5496  		},
  5497  		{
  5498  			name:          "no LocalTrafficDetector",
  5499  			line:          getLine(),
  5500  			masqueradeAll: false,
  5501  			localDetector: false,
  5502  			overrides: map[string]packetFlowTestOverride{
  5503  				// With no LocalTrafficDetector, all traffic to a
  5504  				// ClusterIP is assumed to be from a pod, and thus to not
  5505  				// require masquerading.
  5506  				"node to ClusterIP": {
  5507  					masq: ptr.To(false),
  5508  				},
  5509  				"node to ClusterIP with eTP:Local": {
  5510  					masq: ptr.To(false),
  5511  				},
  5512  				"node to ClusterIP with iTP:Local": {
  5513  					masq: ptr.To(false),
  5514  				},
  5515  				"external to ClusterIP": {
  5516  					masq: ptr.To(false),
  5517  				},
  5518  				"external to ClusterIP with eTP:Local": {
  5519  					masq: ptr.To(false),
  5520  				},
  5521  				"external to ClusterIP with iTP:Local": {
  5522  					masq: ptr.To(false),
  5523  				},
  5524  
  5525  				// And there's no eTP:Local short-circuit for pod traffic,
  5526  				// so pods get only the local endpoints.
  5527  				"pod to NodePort with eTP:Local": {
  5528  					output: ptr.To("10.180.0.2:80"),
  5529  				},
  5530  				"pod to LB with eTP:Local": {
  5531  					output: ptr.To("10.180.0.2:80"),
  5532  				},
  5533  			},
  5534  		},
  5535  		{
  5536  			name:          "masqueradeAll",
  5537  			line:          getLine(),
  5538  			masqueradeAll: true,
  5539  			localDetector: true,
  5540  			overrides: map[string]packetFlowTestOverride{
  5541  				// All "to ClusterIP" traffic gets masqueraded when using
  5542  				// --masquerade-all.
  5543  				"pod to ClusterIP": {
  5544  					masq: ptr.To(true),
  5545  				},
  5546  				"pod to ClusterIP with eTP:Local": {
  5547  					masq: ptr.To(true),
  5548  				},
  5549  				"pod to ClusterIP with iTP:Local": {
  5550  					masq: ptr.To(true),
  5551  				},
  5552  			},
  5553  		},
  5554  		{
  5555  			name:          "masqueradeAll, no LocalTrafficDetector",
  5556  			line:          getLine(),
  5557  			masqueradeAll: true,
  5558  			localDetector: false,
  5559  			overrides: map[string]packetFlowTestOverride{
  5560  				// As in "masqueradeAll"
  5561  				"pod to ClusterIP": {
  5562  					masq: ptr.To(true),
  5563  				},
  5564  				"pod to ClusterIP with eTP:Local": {
  5565  					masq: ptr.To(true),
  5566  				},
  5567  				"pod to ClusterIP with iTP:Local": {
  5568  					masq: ptr.To(true),
  5569  				},
  5570  
  5571  				// As in "no LocalTrafficDetector"
  5572  				"pod to NodePort with eTP:Local": {
  5573  					output: ptr.To("10.180.0.2:80"),
  5574  				},
  5575  				"pod to LB with eTP:Local": {
  5576  					output: ptr.To("10.180.0.2:80"),
  5577  				},
  5578  			},
  5579  		},
  5580  	}
  5581  
  5582  	for _, tc := range testCases {
  5583  		t.Run(tc.name, func(t *testing.T) {
  5584  			ipt := iptablestest.NewFake()
  5585  			fp := NewFakeProxier(ipt)
  5586  			fp.masqueradeAll = tc.masqueradeAll
  5587  			if !tc.localDetector {
  5588  				fp.localDetector = proxyutiliptables.NewNoOpLocalDetector()
  5589  			}
  5590  			setupTest(fp)
  5591  
  5592  			// Merge base flowTests with per-test-case overrides
  5593  			tcFlowTests := make([]packetFlowTest, len(flowTests))
  5594  			overridesApplied := 0
  5595  			for i := range flowTests {
  5596  				tcFlowTests[i] = flowTests[i]
  5597  				if overrides, set := tc.overrides[flowTests[i].name]; set {
  5598  					overridesApplied++
  5599  					if overrides.masq != nil {
  5600  						if tcFlowTests[i].masq == *overrides.masq {
  5601  							t.Errorf("%q override value for masq is same as base value", flowTests[i].name)
  5602  						}
  5603  						tcFlowTests[i].masq = *overrides.masq
  5604  					}
  5605  					if overrides.output != nil {
  5606  						if tcFlowTests[i].output == *overrides.output {
  5607  							t.Errorf("%q override value for output is same as base value", flowTests[i].name)
  5608  						}
  5609  						tcFlowTests[i].output = *overrides.output
  5610  					}
  5611  				}
  5612  			}
  5613  			if overridesApplied != len(tc.overrides) {
  5614  				t.Errorf("%d overrides did not match any test case name!", len(tc.overrides)-overridesApplied)
  5615  			}
  5616  			runPacketFlowTests(t, tc.line, ipt, testNodeIPs, tcFlowTests)
  5617  		})
  5618  	}
  5619  }
  5620  
  5621  func countEndpointsAndComments(iptablesData string, matchEndpoint string) (string, int, int) {
  5622  	var numEndpoints, numComments int
  5623  	var matched string
  5624  	for _, line := range strings.Split(iptablesData, "\n") {
  5625  		if strings.HasPrefix(line, "-A KUBE-SEP-") && strings.Contains(line, "-j DNAT") {
  5626  			numEndpoints++
  5627  			if strings.Contains(line, "--comment") {
  5628  				numComments++
  5629  			}
  5630  			if strings.Contains(line, matchEndpoint) {
  5631  				matched = line
  5632  			}
  5633  		}
  5634  	}
  5635  	return matched, numEndpoints, numComments
  5636  }
  5637  
  5638  func TestSyncProxyRulesLargeClusterMode(t *testing.T) {
  5639  	ipt := iptablestest.NewFake()
  5640  	fp := NewFakeProxier(ipt)
  5641  	fp.masqueradeAll = true
  5642  	fp.syncPeriod = 30 * time.Second
  5643  
  5644  	makeServiceMap(fp,
  5645  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
  5646  			svc.Spec.Type = v1.ServiceTypeClusterIP
  5647  			svc.Spec.ClusterIP = "172.30.0.41"
  5648  			svc.Spec.Ports = []v1.ServicePort{{
  5649  				Name:     "p80",
  5650  				Port:     80,
  5651  				Protocol: v1.ProtocolTCP,
  5652  			}}
  5653  		}),
  5654  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
  5655  			svc.Spec.Type = v1.ServiceTypeClusterIP
  5656  			svc.Spec.ClusterIP = "172.30.0.42"
  5657  			svc.Spec.Ports = []v1.ServicePort{{
  5658  				Name:     "p8080",
  5659  				Port:     8080,
  5660  				Protocol: v1.ProtocolTCP,
  5661  			}}
  5662  		}),
  5663  		makeTestService("ns3", "svc3", func(svc *v1.Service) {
  5664  			svc.Spec.Type = v1.ServiceTypeClusterIP
  5665  			svc.Spec.ClusterIP = "172.30.0.43"
  5666  			svc.Spec.Ports = []v1.ServicePort{{
  5667  				Name:     "p8081",
  5668  				Port:     8081,
  5669  				Protocol: v1.ProtocolTCP,
  5670  			}}
  5671  		}),
  5672  	)
  5673  
  5674  	populateEndpointSlices(fp,
  5675  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  5676  			eps.AddressType = discovery.AddressTypeIPv4
  5677  			eps.Endpoints = make([]discovery.Endpoint, largeClusterEndpointsThreshold/2-1)
  5678  			for i := range eps.Endpoints {
  5679  				eps.Endpoints[i].Addresses = []string{fmt.Sprintf("10.0.%d.%d", i%256, i/256)}
  5680  			}
  5681  			eps.Ports = []discovery.EndpointPort{{
  5682  				Name:     ptr.To("p80"),
  5683  				Port:     ptr.To[int32](80),
  5684  				Protocol: ptr.To(v1.ProtocolTCP),
  5685  			}}
  5686  		}),
  5687  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  5688  			eps.AddressType = discovery.AddressTypeIPv4
  5689  			eps.Endpoints = make([]discovery.Endpoint, largeClusterEndpointsThreshold/2-1)
  5690  			for i := range eps.Endpoints {
  5691  				eps.Endpoints[i].Addresses = []string{fmt.Sprintf("10.1.%d.%d", i%256, i/256)}
  5692  			}
  5693  			eps.Ports = []discovery.EndpointPort{{
  5694  				Name:     ptr.To("p8080"),
  5695  				Port:     ptr.To[int32](8080),
  5696  				Protocol: ptr.To(v1.ProtocolTCP),
  5697  			}}
  5698  		}),
  5699  	)
  5700  
  5701  	fp.syncProxyRules()
  5702  	expectedEndpoints := 2 * (largeClusterEndpointsThreshold/2 - 1)
  5703  
  5704  	firstEndpoint, numEndpoints, numComments := countEndpointsAndComments(fp.iptablesData.String(), "10.0.0.0")
  5705  	assert.Equal(t, "-A KUBE-SEP-DKGQUZGBKLTPAR56 -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.0.0:80", firstEndpoint)
  5706  	if numEndpoints != expectedEndpoints {
  5707  		t.Errorf("Found wrong number of endpoints: expected %d, got %d", expectedEndpoints, numEndpoints)
  5708  	}
  5709  	if numComments != numEndpoints {
  5710  		t.Errorf("numComments (%d) != numEndpoints (%d) when numEndpoints < threshold (%d)", numComments, numEndpoints, largeClusterEndpointsThreshold)
  5711  	}
  5712  
  5713  	fp.OnEndpointSliceAdd(makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
  5714  		eps.AddressType = discovery.AddressTypeIPv4
  5715  		eps.Endpoints = []discovery.Endpoint{{
  5716  			Addresses: []string{"203.0.113.4"},
  5717  		}, {
  5718  			Addresses: []string{"203.0.113.8"},
  5719  		}, {
  5720  			Addresses: []string{"203.0.113.12"},
  5721  		}}
  5722  		eps.Ports = []discovery.EndpointPort{{
  5723  			Name:     ptr.To("p8081"),
  5724  			Port:     ptr.To[int32](8081),
  5725  			Protocol: ptr.To(v1.ProtocolTCP),
  5726  		}}
  5727  	}))
  5728  	fp.syncProxyRules()
  5729  
  5730  	firstEndpoint, numEndpoints, numComments = countEndpointsAndComments(fp.iptablesData.String(), "203.0.113.4")
  5731  	assert.Equal(t, "-A KUBE-SEP-RUVVH7YV3PHQBDOS -m tcp -p tcp -j DNAT --to-destination 203.0.113.4:8081", firstEndpoint)
  5732  	// syncProxyRules will only have output the endpoints for svc3, since the others
  5733  	// didn't change (and syncProxyRules doesn't automatically do a full resync when you
  5734  	// cross the largeClusterEndpointsThreshold).
  5735  	if numEndpoints != 3 {
  5736  		t.Errorf("Found wrong number of endpoints on partial resync: expected %d, got %d", 3, numEndpoints)
  5737  	}
  5738  	if numComments != 0 {
  5739  		t.Errorf("numComments (%d) != 0 after partial resync when numEndpoints (%d) > threshold (%d)", numComments, expectedEndpoints+3, largeClusterEndpointsThreshold)
  5740  	}
  5741  
  5742  	// Now force a full resync and confirm that it rewrites the older services with
  5743  	// no comments as well.
  5744  	fp.forceSyncProxyRules()
  5745  	expectedEndpoints += 3
  5746  
  5747  	firstEndpoint, numEndpoints, numComments = countEndpointsAndComments(fp.iptablesData.String(), "10.0.0.0")
  5748  	assert.Equal(t, "-A KUBE-SEP-DKGQUZGBKLTPAR56 -m tcp -p tcp -j DNAT --to-destination 10.0.0.0:80", firstEndpoint)
  5749  	if numEndpoints != expectedEndpoints {
  5750  		t.Errorf("Found wrong number of endpoints: expected %d, got %d", expectedEndpoints, numEndpoints)
  5751  	}
  5752  	if numComments != 0 {
  5753  		t.Errorf("numComments (%d) != 0 when numEndpoints (%d) > threshold (%d)", numComments, numEndpoints, largeClusterEndpointsThreshold)
  5754  	}
  5755  
  5756  	// Now test service deletion; we have to create another service to do this though,
  5757  	// because if we deleted any of the existing services, we'd fall back out of large
  5758  	// cluster mode.
  5759  	svc4 := makeTestService("ns4", "svc4", func(svc *v1.Service) {
  5760  		svc.Spec.Type = v1.ServiceTypeClusterIP
  5761  		svc.Spec.ClusterIP = "172.30.0.44"
  5762  		svc.Spec.Ports = []v1.ServicePort{{
  5763  			Name:     "p8082",
  5764  			Port:     8082,
  5765  			Protocol: v1.ProtocolTCP,
  5766  		}}
  5767  	})
  5768  	fp.OnServiceAdd(svc4)
  5769  	fp.OnEndpointSliceAdd(makeTestEndpointSlice("ns4", "svc4", 1, func(eps *discovery.EndpointSlice) {
  5770  		eps.AddressType = discovery.AddressTypeIPv4
  5771  		eps.Endpoints = []discovery.Endpoint{{
  5772  			Addresses: []string{"10.4.0.1"},
  5773  		}}
  5774  		eps.Ports = []discovery.EndpointPort{{
  5775  			Name:     ptr.To("p8082"),
  5776  			Port:     ptr.To[int32](8082),
  5777  			Protocol: ptr.To(v1.ProtocolTCP),
  5778  		}}
  5779  	}))
  5780  	fp.syncProxyRules()
  5781  
  5782  	svc4Endpoint, numEndpoints, _ := countEndpointsAndComments(fp.iptablesData.String(), "10.4.0.1")
  5783  	assert.Equal(t, "-A KUBE-SEP-SU5STNODRYEWJAUF -m tcp -p tcp -j DNAT --to-destination 10.4.0.1:8082", svc4Endpoint, "svc4 endpoint was not created")
  5784  	// should only sync svc4
  5785  	if numEndpoints != 1 {
  5786  		t.Errorf("Found wrong number of endpoints after svc4 creation: expected %d, got %d", 1, numEndpoints)
  5787  	}
  5788  
  5789  	// In large-cluster mode, if we delete a service, it will not re-sync its chains
  5790  	// but it will not delete them immediately either.
  5791  	fp.lastIPTablesCleanup = time.Now()
  5792  	fp.OnServiceDelete(svc4)
  5793  	fp.syncProxyRules()
  5794  
  5795  	svc4Endpoint, numEndpoints, _ = countEndpointsAndComments(fp.iptablesData.String(), "10.4.0.1")
  5796  	assert.Equal(t, "", svc4Endpoint, "svc4 endpoint was still created!")
  5797  	// should only sync svc4, and shouldn't output its endpoints
  5798  	if numEndpoints != 0 {
  5799  		t.Errorf("Found wrong number of endpoints after service deletion: expected %d, got %d", 0, numEndpoints)
  5800  	}
  5801  	assert.NotContains(t, fp.iptablesData.String(), "-X ", "iptables data unexpectedly contains chain deletions")
  5802  
  5803  	// But resyncing after a long-enough delay will delete the stale chains
  5804  	fp.lastIPTablesCleanup = time.Now().Add(-fp.syncPeriod).Add(-1)
  5805  	fp.syncProxyRules()
  5806  
  5807  	svc4Endpoint, numEndpoints, _ = countEndpointsAndComments(fp.iptablesData.String(), "10.4.0.1")
  5808  	assert.Equal(t, "", svc4Endpoint, "svc4 endpoint was still created!")
  5809  	if numEndpoints != 0 {
  5810  		t.Errorf("Found wrong number of endpoints after delayed resync: expected %d, got %d", 0, numEndpoints)
  5811  	}
  5812  	assert.Contains(t, fp.iptablesData.String(), "-X KUBE-SVC-EBDQOQU5SJFXRIL3", "iptables data does not contain chain deletion")
  5813  	assert.Contains(t, fp.iptablesData.String(), "-X KUBE-SEP-SU5STNODRYEWJAUF", "iptables data does not contain endpoint deletions")
  5814  
  5815  	// force a full sync and count
  5816  	fp.forceSyncProxyRules()
  5817  	_, numEndpoints, _ = countEndpointsAndComments(fp.iptablesData.String(), "10.0.0.0")
  5818  	if numEndpoints != expectedEndpoints {
  5819  		t.Errorf("Found wrong number of endpoints: expected %d, got %d", expectedEndpoints, numEndpoints)
  5820  	}
  5821  }
  5822  
  5823  // Test calling syncProxyRules() multiple times with various changes
  5824  func TestSyncProxyRulesRepeated(t *testing.T) {
  5825  	ipt := iptablestest.NewFake()
  5826  	fp := NewFakeProxier(ipt)
  5827  	metrics.RegisterMetrics()
  5828  	defer legacyregistry.Reset()
  5829  
  5830  	// Create initial state
  5831  	var svc2 *v1.Service
  5832  
  5833  	makeServiceMap(fp,
  5834  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
  5835  			svc.Spec.Type = v1.ServiceTypeClusterIP
  5836  			svc.Spec.ClusterIP = "172.30.0.41"
  5837  			svc.Spec.Ports = []v1.ServicePort{{
  5838  				Name:     "p80",
  5839  				Port:     80,
  5840  				Protocol: v1.ProtocolTCP,
  5841  			}}
  5842  		}),
  5843  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
  5844  			svc2 = svc
  5845  			svc.Spec.Type = v1.ServiceTypeClusterIP
  5846  			svc.Spec.ClusterIP = "172.30.0.42"
  5847  			svc.Spec.Ports = []v1.ServicePort{{
  5848  				Name:     "p8080",
  5849  				Port:     8080,
  5850  				Protocol: v1.ProtocolTCP,
  5851  			}}
  5852  		}),
  5853  	)
  5854  
  5855  	populateEndpointSlices(fp,
  5856  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  5857  			eps.AddressType = discovery.AddressTypeIPv4
  5858  			eps.Endpoints = []discovery.Endpoint{{
  5859  				Addresses: []string{"10.0.1.1"},
  5860  			}}
  5861  			eps.Ports = []discovery.EndpointPort{{
  5862  				Name:     ptr.To("p80"),
  5863  				Port:     ptr.To[int32](80),
  5864  				Protocol: ptr.To(v1.ProtocolTCP),
  5865  			}}
  5866  		}),
  5867  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  5868  			eps.AddressType = discovery.AddressTypeIPv4
  5869  			eps.Endpoints = []discovery.Endpoint{{
  5870  				Addresses: []string{"10.0.2.1"},
  5871  			}}
  5872  			eps.Ports = []discovery.EndpointPort{{
  5873  				Name:     ptr.To("p8080"),
  5874  				Port:     ptr.To[int32](8080),
  5875  				Protocol: ptr.To(v1.ProtocolTCP),
  5876  			}}
  5877  		}),
  5878  	)
  5879  
  5880  	fp.syncProxyRules()
  5881  
  5882  	expected := dedent.Dedent(`
  5883  		*filter
  5884  		:KUBE-NODEPORTS - [0:0]
  5885  		:KUBE-SERVICES - [0:0]
  5886  		:KUBE-EXTERNAL-SERVICES - [0:0]
  5887  		:KUBE-FIREWALL - [0:0]
  5888  		:KUBE-FORWARD - [0:0]
  5889  		:KUBE-PROXY-FIREWALL - [0:0]
  5890  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  5891  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  5892  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  5893  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  5894  		COMMIT
  5895  		*nat
  5896  		:KUBE-NODEPORTS - [0:0]
  5897  		:KUBE-SERVICES - [0:0]
  5898  		:KUBE-MARK-MASQ - [0:0]
  5899  		:KUBE-POSTROUTING - [0:0]
  5900  		:KUBE-SEP-SNQ3ZNILQDEJNDQO - [0:0]
  5901  		:KUBE-SEP-UHEGFW77JX3KXTOV - [0:0]
  5902  		:KUBE-SVC-2VJB64SDSIJUP5T6 - [0:0]
  5903  		:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
  5904  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  5905  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p8080 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 8080 -j KUBE-SVC-2VJB64SDSIJUP5T6
  5906  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  5907  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  5908  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  5909  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  5910  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  5911  		-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -s 10.0.1.1 -j KUBE-MARK-MASQ
  5912  		-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
  5913  		-A KUBE-SEP-UHEGFW77JX3KXTOV -m comment --comment ns2/svc2:p8080 -s 10.0.2.1 -j KUBE-MARK-MASQ
  5914  		-A KUBE-SEP-UHEGFW77JX3KXTOV -m comment --comment ns2/svc2:p8080 -m tcp -p tcp -j DNAT --to-destination 10.0.2.1:8080
  5915  		-A KUBE-SVC-2VJB64SDSIJUP5T6 -m comment --comment "ns2/svc2:p8080 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 8080 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  5916  		-A KUBE-SVC-2VJB64SDSIJUP5T6 -m comment --comment "ns2/svc2:p8080 -> 10.0.2.1:8080" -j KUBE-SEP-UHEGFW77JX3KXTOV
  5917  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  5918  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.0.1.1:80" -j KUBE-SEP-SNQ3ZNILQDEJNDQO
  5919  		COMMIT
  5920  		`)
  5921  	assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
  5922  
  5923  	rulesSynced := countRules(utiliptables.TableNAT, expected)
  5924  	rulesSyncedMetric := countRulesFromLastSyncMetric(utiliptables.TableNAT)
  5925  	if rulesSyncedMetric != rulesSynced {
  5926  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  5927  	}
  5928  
  5929  	rulesTotal := rulesSynced
  5930  	rulesTotalMetric := countRulesFromMetric(utiliptables.TableNAT)
  5931  	if rulesTotalMetric != rulesTotal {
  5932  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  5933  	}
  5934  
  5935  	// Add a new service and its endpoints. (This will only sync the SVC and SEP rules
  5936  	// for the new service, not the existing ones.)
  5937  	makeServiceMap(fp,
  5938  		makeTestService("ns3", "svc3", func(svc *v1.Service) {
  5939  			svc.Spec.Type = v1.ServiceTypeClusterIP
  5940  			svc.Spec.ClusterIP = "172.30.0.43"
  5941  			svc.Spec.Ports = []v1.ServicePort{{
  5942  				Name:     "p80",
  5943  				Port:     80,
  5944  				Protocol: v1.ProtocolTCP,
  5945  			}}
  5946  		}),
  5947  	)
  5948  	var eps3 *discovery.EndpointSlice
  5949  	populateEndpointSlices(fp,
  5950  		makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
  5951  			eps3 = eps
  5952  			eps.AddressType = discovery.AddressTypeIPv4
  5953  			eps.Endpoints = []discovery.Endpoint{{
  5954  				Addresses: []string{"10.0.3.1"},
  5955  			}}
  5956  			eps.Ports = []discovery.EndpointPort{{
  5957  				Name:     ptr.To("p80"),
  5958  				Port:     ptr.To[int32](80),
  5959  				Protocol: ptr.To(v1.ProtocolTCP),
  5960  			}}
  5961  		}),
  5962  	)
  5963  	fp.syncProxyRules()
  5964  
  5965  	expected = dedent.Dedent(`
  5966  		*filter
  5967  		:KUBE-NODEPORTS - [0:0]
  5968  		:KUBE-SERVICES - [0:0]
  5969  		:KUBE-EXTERNAL-SERVICES - [0:0]
  5970  		:KUBE-FIREWALL - [0:0]
  5971  		:KUBE-FORWARD - [0:0]
  5972  		:KUBE-PROXY-FIREWALL - [0:0]
  5973  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  5974  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  5975  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  5976  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  5977  		COMMIT
  5978  		*nat
  5979  		:KUBE-NODEPORTS - [0:0]
  5980  		:KUBE-SERVICES - [0:0]
  5981  		:KUBE-MARK-MASQ - [0:0]
  5982  		:KUBE-POSTROUTING - [0:0]
  5983  		:KUBE-SEP-BSWRHOQ77KEXZLNL - [0:0]
  5984  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  5985  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  5986  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p8080 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 8080 -j KUBE-SVC-2VJB64SDSIJUP5T6
  5987  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  5988  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  5989  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  5990  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  5991  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  5992  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  5993  		-A KUBE-SEP-BSWRHOQ77KEXZLNL -m comment --comment ns3/svc3:p80 -s 10.0.3.1 -j KUBE-MARK-MASQ
  5994  		-A KUBE-SEP-BSWRHOQ77KEXZLNL -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.1:80
  5995  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  5996  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.1:80" -j KUBE-SEP-BSWRHOQ77KEXZLNL
  5997  		COMMIT
  5998  		`)
  5999  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6000  
  6001  	rulesSynced = countRules(utiliptables.TableNAT, expected)
  6002  	rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
  6003  	if rulesSyncedMetric != rulesSynced {
  6004  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6005  	}
  6006  
  6007  	// We added 1 KUBE-SERVICES rule, 2 KUBE-SVC-X27LE4BHSL4DOUIK rules, and 2
  6008  	// KUBE-SEP-BSWRHOQ77KEXZLNL rules.
  6009  	rulesTotal += 5
  6010  	rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
  6011  	if rulesTotalMetric != rulesTotal {
  6012  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6013  	}
  6014  
  6015  	// Delete a service. (Won't update the other services.)
  6016  	fp.OnServiceDelete(svc2)
  6017  	fp.syncProxyRules()
  6018  
  6019  	expected = dedent.Dedent(`
  6020  		*filter
  6021  		:KUBE-NODEPORTS - [0:0]
  6022  		:KUBE-SERVICES - [0:0]
  6023  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6024  		:KUBE-FIREWALL - [0:0]
  6025  		:KUBE-FORWARD - [0:0]
  6026  		:KUBE-PROXY-FIREWALL - [0:0]
  6027  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6028  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6029  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6030  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6031  		COMMIT
  6032  		*nat
  6033  		:KUBE-NODEPORTS - [0:0]
  6034  		:KUBE-SERVICES - [0:0]
  6035  		:KUBE-MARK-MASQ - [0:0]
  6036  		:KUBE-POSTROUTING - [0:0]
  6037  		:KUBE-SEP-UHEGFW77JX3KXTOV - [0:0]
  6038  		:KUBE-SVC-2VJB64SDSIJUP5T6 - [0:0]
  6039  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6040  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6041  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6042  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6043  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6044  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6045  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6046  		-X KUBE-SEP-UHEGFW77JX3KXTOV
  6047  		-X KUBE-SVC-2VJB64SDSIJUP5T6
  6048  		COMMIT
  6049  		`)
  6050  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6051  
  6052  	rulesSynced = countRules(utiliptables.TableNAT, expected)
  6053  	rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
  6054  	if rulesSyncedMetric != rulesSynced {
  6055  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6056  	}
  6057  
  6058  	// We deleted 1 KUBE-SERVICES rule, 2 KUBE-SVC-2VJB64SDSIJUP5T6 rules, and 2
  6059  	// KUBE-SEP-UHEGFW77JX3KXTOV rules
  6060  	rulesTotal -= 5
  6061  	rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
  6062  	if rulesTotalMetric != rulesTotal {
  6063  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6064  	}
  6065  
  6066  	// Add a service, sync, then add its endpoints. (The first sync will be a no-op other
  6067  	// than adding the REJECT rule. The second sync will create the new service.)
  6068  	var svc4 *v1.Service
  6069  	makeServiceMap(fp,
  6070  		makeTestService("ns4", "svc4", func(svc *v1.Service) {
  6071  			svc4 = svc
  6072  			svc.Spec.Type = v1.ServiceTypeClusterIP
  6073  			svc.Spec.ClusterIP = "172.30.0.44"
  6074  			svc.Spec.Ports = []v1.ServicePort{{
  6075  				Name:     "p80",
  6076  				Port:     80,
  6077  				Protocol: v1.ProtocolTCP,
  6078  			}}
  6079  		}),
  6080  	)
  6081  	fp.syncProxyRules()
  6082  	expected = dedent.Dedent(`
  6083  		*filter
  6084  		:KUBE-NODEPORTS - [0:0]
  6085  		:KUBE-SERVICES - [0:0]
  6086  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6087  		:KUBE-FIREWALL - [0:0]
  6088  		:KUBE-FORWARD - [0:0]
  6089  		:KUBE-PROXY-FIREWALL - [0:0]
  6090  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j REJECT
  6091  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6092  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6093  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6094  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6095  		COMMIT
  6096  		*nat
  6097  		:KUBE-NODEPORTS - [0:0]
  6098  		:KUBE-SERVICES - [0:0]
  6099  		:KUBE-MARK-MASQ - [0:0]
  6100  		:KUBE-POSTROUTING - [0:0]
  6101  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6102  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6103  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6104  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6105  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6106  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6107  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6108  		COMMIT
  6109  		`)
  6110  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6111  
  6112  	rulesSynced = countRules(utiliptables.TableNAT, expected)
  6113  	rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
  6114  	if rulesSyncedMetric != rulesSynced {
  6115  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6116  	}
  6117  
  6118  	// The REJECT rule is in "filter", not NAT, so the number of NAT rules hasn't
  6119  	// changed.
  6120  	rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
  6121  	if rulesTotalMetric != rulesTotal {
  6122  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6123  	}
  6124  
  6125  	populateEndpointSlices(fp,
  6126  		makeTestEndpointSlice("ns4", "svc4", 1, func(eps *discovery.EndpointSlice) {
  6127  			eps.AddressType = discovery.AddressTypeIPv4
  6128  			eps.Endpoints = []discovery.Endpoint{{
  6129  				Addresses: []string{"10.0.4.1"},
  6130  			}}
  6131  			eps.Ports = []discovery.EndpointPort{{
  6132  				Name:     ptr.To("p80"),
  6133  				Port:     ptr.To[int32](80),
  6134  				Protocol: ptr.To(v1.ProtocolTCP),
  6135  			}}
  6136  		}),
  6137  	)
  6138  	fp.syncProxyRules()
  6139  	expected = dedent.Dedent(`
  6140  		*filter
  6141  		:KUBE-NODEPORTS - [0:0]
  6142  		:KUBE-SERVICES - [0:0]
  6143  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6144  		:KUBE-FIREWALL - [0:0]
  6145  		:KUBE-FORWARD - [0:0]
  6146  		:KUBE-PROXY-FIREWALL - [0:0]
  6147  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6148  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6149  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6150  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6151  		COMMIT
  6152  		*nat
  6153  		:KUBE-NODEPORTS - [0:0]
  6154  		:KUBE-SERVICES - [0:0]
  6155  		:KUBE-MARK-MASQ - [0:0]
  6156  		:KUBE-POSTROUTING - [0:0]
  6157  		:KUBE-SEP-AYCN5HPXMIRJNJXU - [0:0]
  6158  		:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
  6159  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6160  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6161  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  6162  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6163  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6164  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6165  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6166  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6167  		-A KUBE-SEP-AYCN5HPXMIRJNJXU -m comment --comment ns4/svc4:p80 -s 10.0.4.1 -j KUBE-MARK-MASQ
  6168  		-A KUBE-SEP-AYCN5HPXMIRJNJXU -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.4.1:80
  6169  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6170  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.0.4.1:80" -j KUBE-SEP-AYCN5HPXMIRJNJXU
  6171  		COMMIT
  6172  		`)
  6173  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6174  
  6175  	rulesSynced = countRules(utiliptables.TableNAT, expected)
  6176  	rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
  6177  	if rulesSyncedMetric != rulesSynced {
  6178  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6179  	}
  6180  
  6181  	// We added 1 KUBE-SERVICES rule, 2 KUBE-SVC-4SW47YFZTEDKD3PK rules, and
  6182  	// 2 KUBE-SEP-AYCN5HPXMIRJNJXU rules
  6183  	rulesTotal += 5
  6184  	rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
  6185  	if rulesTotalMetric != rulesTotal {
  6186  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6187  	}
  6188  
  6189  	// Change an endpoint of an existing service. This will cause its SVC and SEP
  6190  	// chains to be rewritten.
  6191  	eps3update := eps3.DeepCopy()
  6192  	eps3update.Endpoints[0].Addresses[0] = "10.0.3.2"
  6193  	fp.OnEndpointSliceUpdate(eps3, eps3update)
  6194  	fp.syncProxyRules()
  6195  
  6196  	expected = dedent.Dedent(`
  6197  		*filter
  6198  		:KUBE-NODEPORTS - [0:0]
  6199  		:KUBE-SERVICES - [0:0]
  6200  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6201  		:KUBE-FIREWALL - [0:0]
  6202  		:KUBE-FORWARD - [0:0]
  6203  		:KUBE-PROXY-FIREWALL - [0:0]
  6204  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6205  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6206  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6207  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6208  		COMMIT
  6209  		*nat
  6210  		:KUBE-NODEPORTS - [0:0]
  6211  		:KUBE-SERVICES - [0:0]
  6212  		:KUBE-MARK-MASQ - [0:0]
  6213  		:KUBE-POSTROUTING - [0:0]
  6214  		:KUBE-SEP-BSWRHOQ77KEXZLNL - [0:0]
  6215  		:KUBE-SEP-DKCFIS26GWF2WLWC - [0:0]
  6216  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  6217  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6218  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6219  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  6220  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6221  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6222  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6223  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6224  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6225  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -s 10.0.3.2 -j KUBE-MARK-MASQ
  6226  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.2:80
  6227  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6228  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.2:80" -j KUBE-SEP-DKCFIS26GWF2WLWC
  6229  		-X KUBE-SEP-BSWRHOQ77KEXZLNL
  6230  		COMMIT
  6231  		`)
  6232  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6233  
  6234  	rulesSynced = countRules(utiliptables.TableNAT, expected)
  6235  	rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
  6236  	if rulesSyncedMetric != rulesSynced {
  6237  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6238  	}
  6239  
  6240  	// We rewrote existing rules but did not change the overall number of rules.
  6241  	rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
  6242  	if rulesTotalMetric != rulesTotal {
  6243  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6244  	}
  6245  
  6246  	// Add an endpoint to a service. This will cause its SVC and SEP chains to be rewritten.
  6247  	eps3update2 := eps3update.DeepCopy()
  6248  	eps3update2.Endpoints = append(eps3update2.Endpoints, discovery.Endpoint{Addresses: []string{"10.0.3.3"}})
  6249  	fp.OnEndpointSliceUpdate(eps3update, eps3update2)
  6250  	fp.syncProxyRules()
  6251  
  6252  	expected = dedent.Dedent(`
  6253  		*filter
  6254  		:KUBE-NODEPORTS - [0:0]
  6255  		:KUBE-SERVICES - [0:0]
  6256  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6257  		:KUBE-FIREWALL - [0:0]
  6258  		:KUBE-FORWARD - [0:0]
  6259  		:KUBE-PROXY-FIREWALL - [0:0]
  6260  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6261  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6262  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6263  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6264  		COMMIT
  6265  		*nat
  6266  		:KUBE-NODEPORTS - [0:0]
  6267  		:KUBE-SERVICES - [0:0]
  6268  		:KUBE-MARK-MASQ - [0:0]
  6269  		:KUBE-POSTROUTING - [0:0]
  6270  		:KUBE-SEP-DKCFIS26GWF2WLWC - [0:0]
  6271  		:KUBE-SEP-JVVZVJ7BSEPPRNBS - [0:0]
  6272  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  6273  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6274  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6275  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  6276  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6277  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6278  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6279  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6280  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6281  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -s 10.0.3.2 -j KUBE-MARK-MASQ
  6282  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.2:80
  6283  		-A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -s 10.0.3.3 -j KUBE-MARK-MASQ
  6284  		-A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.3:80
  6285  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6286  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-DKCFIS26GWF2WLWC
  6287  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.3:80" -j KUBE-SEP-JVVZVJ7BSEPPRNBS
  6288  		COMMIT
  6289  		`)
  6290  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6291  
  6292  	rulesSynced = countRules(utiliptables.TableNAT, expected)
  6293  	rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
  6294  	if rulesSyncedMetric != rulesSynced {
  6295  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6296  	}
  6297  
  6298  	// We added 2 KUBE-SEP-JVVZVJ7BSEPPRNBS rules and 1 KUBE-SVC-X27LE4BHSL4DOUIK rule
  6299  	// jumping to the new SEP chain. The other rules related to svc3 got rewritten,
  6300  	// but that does not change the count of rules.
  6301  	rulesTotal += 3
  6302  	rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
  6303  	if rulesTotalMetric != rulesTotal {
  6304  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6305  	}
  6306  
  6307  	// Sync with no new changes... This will not rewrite any SVC or SEP chains
  6308  	fp.syncProxyRules()
  6309  
  6310  	expected = dedent.Dedent(`
  6311  		*filter
  6312  		:KUBE-NODEPORTS - [0:0]
  6313  		:KUBE-SERVICES - [0:0]
  6314  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6315  		:KUBE-FIREWALL - [0:0]
  6316  		:KUBE-FORWARD - [0:0]
  6317  		:KUBE-PROXY-FIREWALL - [0:0]
  6318  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6319  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6320  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6321  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6322  		COMMIT
  6323  		*nat
  6324  		:KUBE-NODEPORTS - [0:0]
  6325  		:KUBE-SERVICES - [0:0]
  6326  		:KUBE-MARK-MASQ - [0:0]
  6327  		:KUBE-POSTROUTING - [0:0]
  6328  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6329  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6330  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  6331  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6332  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6333  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6334  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6335  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6336  		COMMIT
  6337  		`)
  6338  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6339  
  6340  	rulesSynced = countRules(utiliptables.TableNAT, expected)
  6341  	rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
  6342  	if rulesSyncedMetric != rulesSynced {
  6343  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6344  	}
  6345  
  6346  	// (No changes)
  6347  	rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
  6348  	if rulesTotalMetric != rulesTotal {
  6349  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6350  	}
  6351  
  6352  	// Now force a partial resync error and ensure that it recovers correctly
  6353  	if fp.needFullSync {
  6354  		t.Fatalf("Proxier unexpectedly already needs a full sync?")
  6355  	}
  6356  	partialRestoreFailures, err := testutil.GetCounterMetricValue(metrics.IptablesPartialRestoreFailuresTotal)
  6357  	if err != nil {
  6358  		t.Fatalf("Could not get partial restore failures metric: %v", err)
  6359  	}
  6360  	if partialRestoreFailures != 0.0 {
  6361  		t.Errorf("Already did a partial resync? Something failed earlier!")
  6362  	}
  6363  
  6364  	// Add a rule jumping from svc3's service chain to svc4's endpoint, then try to
  6365  	// delete svc4. This will fail because the partial resync won't rewrite svc3's
  6366  	// rules and so the partial restore would leave a dangling jump from there to
  6367  	// svc4's endpoint. The proxier will then queue a full resync in response to the
  6368  	// partial resync failure, and the full resync will succeed (since it will rewrite
  6369  	// svc3's rules as well).
  6370  	//
  6371  	// This is an absurd scenario, but it has to be; partial resync failures are
  6372  	// supposed to be impossible; if we knew of any non-absurd scenario that would
  6373  	// cause such a failure, then that would be a bug and we would fix it.
  6374  	if _, err := fp.iptables.ChainExists(utiliptables.TableNAT, utiliptables.Chain("KUBE-SEP-AYCN5HPXMIRJNJXU")); err != nil {
  6375  		t.Fatalf("svc4's endpoint chain unexpected already does not exist!")
  6376  	}
  6377  	if _, err := fp.iptables.EnsureRule(utiliptables.Append, utiliptables.TableNAT, utiliptables.Chain("KUBE-SVC-X27LE4BHSL4DOUIK"), "-j", "KUBE-SEP-AYCN5HPXMIRJNJXU"); err != nil {
  6378  		t.Fatalf("Could not add bad iptables rule: %v", err)
  6379  	}
  6380  
  6381  	fp.OnServiceDelete(svc4)
  6382  	fp.syncProxyRules()
  6383  
  6384  	if _, err := fp.iptables.ChainExists(utiliptables.TableNAT, utiliptables.Chain("KUBE-SEP-AYCN5HPXMIRJNJXU")); err != nil {
  6385  		t.Errorf("svc4's endpoint chain was successfully deleted despite dangling references!")
  6386  	}
  6387  	if !fp.needFullSync {
  6388  		t.Errorf("Proxier did not fail on previous partial resync?")
  6389  	}
  6390  	updatedPartialRestoreFailures, err := testutil.GetCounterMetricValue(metrics.IptablesPartialRestoreFailuresTotal)
  6391  	if err != nil {
  6392  		t.Errorf("Could not get partial restore failures metric: %v", err)
  6393  	}
  6394  	if updatedPartialRestoreFailures != partialRestoreFailures+1.0 {
  6395  		t.Errorf("Partial restore failures metric was not incremented after failed partial resync (expected %.02f, got %.02f)", partialRestoreFailures+1.0, updatedPartialRestoreFailures)
  6396  	}
  6397  
  6398  	// On retry we should do a full resync, which should succeed (and delete svc4)
  6399  	fp.syncProxyRules()
  6400  
  6401  	expected = dedent.Dedent(`
  6402  		*filter
  6403  		:KUBE-NODEPORTS - [0:0]
  6404  		:KUBE-SERVICES - [0:0]
  6405  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6406  		:KUBE-FIREWALL - [0:0]
  6407  		:KUBE-FORWARD - [0:0]
  6408  		:KUBE-PROXY-FIREWALL - [0:0]
  6409  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6410  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6411  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6412  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6413  		COMMIT
  6414  		*nat
  6415  		:KUBE-NODEPORTS - [0:0]
  6416  		:KUBE-SERVICES - [0:0]
  6417  		:KUBE-MARK-MASQ - [0:0]
  6418  		:KUBE-POSTROUTING - [0:0]
  6419  		:KUBE-SEP-AYCN5HPXMIRJNJXU - [0:0]
  6420  		:KUBE-SEP-DKCFIS26GWF2WLWC - [0:0]
  6421  		:KUBE-SEP-JVVZVJ7BSEPPRNBS - [0:0]
  6422  		:KUBE-SEP-SNQ3ZNILQDEJNDQO - [0:0]
  6423  		:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
  6424  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  6425  		:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
  6426  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6427  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6428  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6429  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6430  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6431  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6432  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6433  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -s 10.0.3.2 -j KUBE-MARK-MASQ
  6434  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.2:80
  6435  		-A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -s 10.0.3.3 -j KUBE-MARK-MASQ
  6436  		-A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.3:80
  6437  		-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -s 10.0.1.1 -j KUBE-MARK-MASQ
  6438  		-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
  6439  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6440  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-DKCFIS26GWF2WLWC
  6441  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.3:80" -j KUBE-SEP-JVVZVJ7BSEPPRNBS
  6442  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6443  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.0.1.1:80" -j KUBE-SEP-SNQ3ZNILQDEJNDQO
  6444  		-X KUBE-SEP-AYCN5HPXMIRJNJXU
  6445  		-X KUBE-SVC-4SW47YFZTEDKD3PK
  6446  		COMMIT
  6447  		`)
  6448  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6449  
  6450  	rulesSynced = countRules(utiliptables.TableNAT, expected)
  6451  	rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
  6452  	if rulesSyncedMetric != rulesSynced {
  6453  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6454  	}
  6455  
  6456  	// We deleted 1 KUBE-SERVICES rule, 2 KUBE-SVC-4SW47YFZTEDKD3PK rules, and 2
  6457  	// KUBE-SEP-AYCN5HPXMIRJNJXU rules
  6458  	rulesTotal -= 5
  6459  	rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
  6460  	if rulesTotalMetric != rulesTotal {
  6461  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6462  	}
  6463  }
  6464  
  6465  func TestNoEndpointsMetric(t *testing.T) {
  6466  	type endpoint struct {
  6467  		ip       string
  6468  		hostname string
  6469  	}
  6470  
  6471  	metrics.RegisterMetrics()
  6472  	testCases := []struct {
  6473  		name                                                string
  6474  		internalTrafficPolicy                               *v1.ServiceInternalTrafficPolicy
  6475  		externalTrafficPolicy                               v1.ServiceExternalTrafficPolicy
  6476  		endpoints                                           []endpoint
  6477  		expectedSyncProxyRulesNoLocalEndpointsTotalInternal int
  6478  		expectedSyncProxyRulesNoLocalEndpointsTotalExternal int
  6479  	}{
  6480  		{
  6481  			name:                  "internalTrafficPolicy is set and there are local endpoints",
  6482  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  6483  			endpoints: []endpoint{
  6484  				{"10.0.1.1", testHostname},
  6485  				{"10.0.1.2", "host1"},
  6486  				{"10.0.1.3", "host2"},
  6487  			},
  6488  		},
  6489  		{
  6490  			name:                  "externalTrafficPolicy is set and there are local endpoints",
  6491  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  6492  			endpoints: []endpoint{
  6493  				{"10.0.1.1", testHostname},
  6494  				{"10.0.1.2", "host1"},
  6495  				{"10.0.1.3", "host2"},
  6496  			},
  6497  		},
  6498  		{
  6499  			name:                  "both policies are set and there are local endpoints",
  6500  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  6501  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  6502  			endpoints: []endpoint{
  6503  				{"10.0.1.1", testHostname},
  6504  				{"10.0.1.2", "host1"},
  6505  				{"10.0.1.3", "host2"},
  6506  			},
  6507  		},
  6508  		{
  6509  			name:                  "internalTrafficPolicy is set and there are no local endpoints",
  6510  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  6511  			endpoints: []endpoint{
  6512  				{"10.0.1.1", "host0"},
  6513  				{"10.0.1.2", "host1"},
  6514  				{"10.0.1.3", "host2"},
  6515  			},
  6516  			expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 1,
  6517  		},
  6518  		{
  6519  			name:                  "externalTrafficPolicy is set and there are no local endpoints",
  6520  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  6521  			endpoints: []endpoint{
  6522  				{"10.0.1.1", "host0"},
  6523  				{"10.0.1.2", "host1"},
  6524  				{"10.0.1.3", "host2"},
  6525  			},
  6526  			expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 1,
  6527  		},
  6528  		{
  6529  			name:                  "both policies are set and there are no local endpoints",
  6530  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  6531  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  6532  			endpoints: []endpoint{
  6533  				{"10.0.1.1", "host0"},
  6534  				{"10.0.1.2", "host1"},
  6535  				{"10.0.1.3", "host2"},
  6536  			},
  6537  			expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 1,
  6538  			expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 1,
  6539  		},
  6540  		{
  6541  			name:                  "both policies are set and there are no endpoints at all",
  6542  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  6543  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  6544  			endpoints:             []endpoint{},
  6545  			expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 0,
  6546  			expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 0,
  6547  		},
  6548  	}
  6549  
  6550  	for _, tc := range testCases {
  6551  		t.Run(tc.name, func(t *testing.T) {
  6552  			ipt := iptablestest.NewFake()
  6553  			fp := NewFakeProxier(ipt)
  6554  			fp.OnServiceSynced()
  6555  			fp.OnEndpointSlicesSynced()
  6556  
  6557  			serviceName := "svc1"
  6558  			namespaceName := "ns1"
  6559  
  6560  			svc := &v1.Service{
  6561  				ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
  6562  				Spec: v1.ServiceSpec{
  6563  					ClusterIP: "172.30.1.1",
  6564  					Selector:  map[string]string{"foo": "bar"},
  6565  					Ports:     []v1.ServicePort{{Name: "", Port: 80, Protocol: v1.ProtocolTCP, NodePort: 123}},
  6566  				},
  6567  			}
  6568  			if tc.internalTrafficPolicy != nil {
  6569  				svc.Spec.InternalTrafficPolicy = tc.internalTrafficPolicy
  6570  			}
  6571  			if tc.externalTrafficPolicy != "" {
  6572  				svc.Spec.Type = v1.ServiceTypeNodePort
  6573  				svc.Spec.ExternalTrafficPolicy = tc.externalTrafficPolicy
  6574  			}
  6575  
  6576  			fp.OnServiceAdd(svc)
  6577  
  6578  			endpointSlice := &discovery.EndpointSlice{
  6579  				ObjectMeta: metav1.ObjectMeta{
  6580  					Name:      fmt.Sprintf("%s-1", serviceName),
  6581  					Namespace: namespaceName,
  6582  					Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  6583  				},
  6584  				Ports: []discovery.EndpointPort{{
  6585  					Name:     ptr.To(""),
  6586  					Port:     ptr.To[int32](80),
  6587  					Protocol: ptr.To(v1.ProtocolTCP),
  6588  				}},
  6589  				AddressType: discovery.AddressTypeIPv4,
  6590  			}
  6591  			for _, ep := range tc.endpoints {
  6592  				endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{
  6593  					Addresses:  []string{ep.ip},
  6594  					Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  6595  					NodeName:   ptr.To(ep.hostname),
  6596  				})
  6597  			}
  6598  
  6599  			fp.OnEndpointSliceAdd(endpointSlice)
  6600  			fp.syncProxyRules()
  6601  			syncProxyRulesNoLocalEndpointsTotalInternal, err := testutil.GetGaugeMetricValue(metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("internal"))
  6602  			if err != nil {
  6603  				t.Errorf("failed to get %s value, err: %v", metrics.SyncProxyRulesNoLocalEndpointsTotal.Name, err)
  6604  			}
  6605  
  6606  			if tc.expectedSyncProxyRulesNoLocalEndpointsTotalInternal != int(syncProxyRulesNoLocalEndpointsTotalInternal) {
  6607  				t.Errorf("sync_proxy_rules_no_endpoints_total metric mismatch(internal): got=%d, expected %d", int(syncProxyRulesNoLocalEndpointsTotalInternal), tc.expectedSyncProxyRulesNoLocalEndpointsTotalInternal)
  6608  			}
  6609  
  6610  			syncProxyRulesNoLocalEndpointsTotalExternal, err := testutil.GetGaugeMetricValue(metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("external"))
  6611  			if err != nil {
  6612  				t.Errorf("failed to get %s value(external), err: %v", metrics.SyncProxyRulesNoLocalEndpointsTotal.Name, err)
  6613  			}
  6614  
  6615  			if tc.expectedSyncProxyRulesNoLocalEndpointsTotalExternal != int(syncProxyRulesNoLocalEndpointsTotalExternal) {
  6616  				t.Errorf("sync_proxy_rules_no_endpoints_total metric mismatch(internal): got=%d, expected %d", int(syncProxyRulesNoLocalEndpointsTotalExternal), tc.expectedSyncProxyRulesNoLocalEndpointsTotalExternal)
  6617  			}
  6618  		})
  6619  	}
  6620  }
  6621  
  6622  func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) {
  6623  	testCases := []struct {
  6624  		name          string
  6625  		ipModeEnabled bool
  6626  		svcIP         string
  6627  		svcLBIP       string
  6628  		ipMode        *v1.LoadBalancerIPMode
  6629  		expectedRule  bool
  6630  	}{
  6631  		/* LoadBalancerIPMode disabled */
  6632  		{
  6633  			name:          "LoadBalancerIPMode disabled, ipMode Proxy",
  6634  			ipModeEnabled: false,
  6635  			svcIP:         "10.20.30.41",
  6636  			svcLBIP:       "1.2.3.4",
  6637  			ipMode:        ptr.To(v1.LoadBalancerIPModeProxy),
  6638  			expectedRule:  true,
  6639  		},
  6640  		{
  6641  			name:          "LoadBalancerIPMode disabled, ipMode VIP",
  6642  			ipModeEnabled: false,
  6643  			svcIP:         "10.20.30.42",
  6644  			svcLBIP:       "1.2.3.5",
  6645  			ipMode:        ptr.To(v1.LoadBalancerIPModeVIP),
  6646  			expectedRule:  true,
  6647  		},
  6648  		{
  6649  			name:          "LoadBalancerIPMode disabled, ipMode nil",
  6650  			ipModeEnabled: false,
  6651  			svcIP:         "10.20.30.43",
  6652  			svcLBIP:       "1.2.3.6",
  6653  			ipMode:        nil,
  6654  			expectedRule:  true,
  6655  		},
  6656  		/* LoadBalancerIPMode enabled */
  6657  		{
  6658  			name:          "LoadBalancerIPMode enabled, ipMode Proxy",
  6659  			ipModeEnabled: true,
  6660  			svcIP:         "10.20.30.41",
  6661  			svcLBIP:       "1.2.3.4",
  6662  			ipMode:        ptr.To(v1.LoadBalancerIPModeProxy),
  6663  			expectedRule:  false,
  6664  		},
  6665  		{
  6666  			name:          "LoadBalancerIPMode enabled, ipMode VIP",
  6667  			ipModeEnabled: true,
  6668  			svcIP:         "10.20.30.42",
  6669  			svcLBIP:       "1.2.3.5",
  6670  			ipMode:        ptr.To(v1.LoadBalancerIPModeVIP),
  6671  			expectedRule:  true,
  6672  		},
  6673  		{
  6674  			name:          "LoadBalancerIPMode enabled, ipMode nil",
  6675  			ipModeEnabled: true,
  6676  			svcIP:         "10.20.30.43",
  6677  			svcLBIP:       "1.2.3.6",
  6678  			ipMode:        nil,
  6679  			expectedRule:  true,
  6680  		},
  6681  	}
  6682  
  6683  	svcPort := 80
  6684  	svcNodePort := 3001
  6685  	svcPortName := proxy.ServicePortName{
  6686  		NamespacedName: makeNSN("ns1", "svc1"),
  6687  		Port:           "p80",
  6688  		Protocol:       v1.ProtocolTCP,
  6689  	}
  6690  
  6691  	for _, testCase := range testCases {
  6692  		t.Run(testCase.name, func(t *testing.T) {
  6693  			defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LoadBalancerIPMode, testCase.ipModeEnabled)()
  6694  			ipt := iptablestest.NewFake()
  6695  			fp := NewFakeProxier(ipt)
  6696  			makeServiceMap(fp,
  6697  				makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  6698  					svc.Spec.Type = "LoadBalancer"
  6699  					svc.Spec.ClusterIP = testCase.svcIP
  6700  					svc.Spec.Ports = []v1.ServicePort{{
  6701  						Name:     svcPortName.Port,
  6702  						Port:     int32(svcPort),
  6703  						Protocol: v1.ProtocolTCP,
  6704  						NodePort: int32(svcNodePort),
  6705  					}}
  6706  					svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  6707  						IP:     testCase.svcLBIP,
  6708  						IPMode: testCase.ipMode,
  6709  					}}
  6710  				}),
  6711  			)
  6712  
  6713  			populateEndpointSlices(fp,
  6714  				makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  6715  					eps.AddressType = discovery.AddressTypeIPv4
  6716  					eps.Endpoints = []discovery.Endpoint{{
  6717  						Addresses: []string{"10.180.0.1"},
  6718  					}}
  6719  					eps.Ports = []discovery.EndpointPort{{
  6720  						Name:     ptr.To("p80"),
  6721  						Port:     ptr.To[int32](80),
  6722  						Protocol: ptr.To(v1.ProtocolTCP),
  6723  					}}
  6724  				}),
  6725  			)
  6726  
  6727  			fp.syncProxyRules()
  6728  
  6729  			c, _ := ipt.Dump.GetChain(utiliptables.TableNAT, kubeServicesChain)
  6730  			ruleExists := false
  6731  			for _, r := range c.Rules {
  6732  				if r.DestinationAddress != nil && r.DestinationAddress.Value == testCase.svcLBIP {
  6733  					ruleExists = true
  6734  				}
  6735  			}
  6736  			if ruleExists != testCase.expectedRule {
  6737  				t.Errorf("unexpected rule for %s", testCase.svcLBIP)
  6738  			}
  6739  		})
  6740  	}
  6741  }
  6742  

View as plain text