1import difflib
2import json
3import logging
4import os
5import random
6import re
7import sys
8from pathlib import Path
9from typing import Any, Callable, Dict, List, Optional, Tuple
10
11import pytest
12import yaml
13
14logging.basicConfig(
15 level=logging.DEBUG,
16 format="%(asctime)s test %(levelname)s: %(message)s",
17 datefmt="%Y-%m-%d %H:%M:%S",
18)
19
20logger = logging.getLogger("ambassador")
21logger.setLevel(logging.DEBUG)
22
23from ambassador import IR, Cache, Config, EnvoyConfig
24from ambassador.fetch import ResourceFetcher
25from ambassador.utils import NullSecretHandler
26
27
28class Builder:
29 def __init__(
30 self, logger: logging.Logger, tmpdir: Path, yaml_file: str, enable_cache=True
31 ) -> None:
32 self.logger = logger
33
34 self.test_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "testdata")
35
36 self.cache: Optional[Cache] = None
37
38 if enable_cache:
39 self.cache = Cache(logger)
40
41 # This is a brutal hack: we load all the YAML, store it as objects, then
42 # build IR and econf from the re-serialized YAML from these resources.
43 # The reason is that it's kind of the only way we can apply deltas in
44 # a meaningful way.
45 self.resources: Dict[str, Any] = {}
46 self.deltas: Dict[str, Any] = {}
47
48 # Load the initial YAML.
49 self.apply_yaml(yaml_file, allow_updates=False)
50 self.secret_handler = NullSecretHandler(
51 logger, str(tmpdir / "secrets" / "src"), str(tmpdir / "secrets" / "cache"), "0"
52 )
53
54 # Save builds to make this simpler to call.
55 self.builds: List[Tuple[IR, EnvoyConfig]] = []
56
57 def current_yaml(self) -> str:
58 return yaml.safe_dump_all(list(self.resources.values()))
59
60 def apply_yaml(self, yaml_file: str, allow_updates=True) -> None:
61 yaml_data = open(os.path.join(self.test_dir, yaml_file), "r").read()
62
63 self.apply_yaml_string(yaml_data, allow_updates)
64
65 def apply_yaml_string(self, yaml_data: str, allow_updates=True) -> None:
66 for rsrc in yaml.safe_load_all(yaml_data):
67 # We require kind, metadata.name, and metadata.namespace here.
68 kind = rsrc["kind"]
69 metadata = rsrc["metadata"]
70 name = metadata["name"]
71 namespace = metadata["namespace"]
72
73 key = f"{kind}-v2-{name}-{namespace}"
74
75 dtype = "add"
76
77 if key in self.resources:
78 # This is an attempted update.
79 if not allow_updates:
80 raise RuntimeError(f"Cannot update {key}")
81
82 dtype = "update"
83
84 # if self.cache is not None:
85 # self.cache.invalidate(key)
86
87 self.resources[key] = rsrc
88 self.deltas[key] = {
89 "kind": kind,
90 "apiVersion": rsrc["apiVersion"],
91 "metadata": {
92 "name": name,
93 "namespace": namespace,
94 "creationTimestamp": metadata.get("creationTimestamp", "2021-11-19T15:11:45Z"),
95 },
96 "deltaType": dtype,
97 }
98
99 def delete_yaml(self, yaml_file: str) -> None:
100 yaml_data = open(os.path.join(self.test_dir, yaml_file), "r").read()
101
102 self.delete_yaml_string(yaml_data)
103
104 def delete_yaml_string(self, yaml_data: str) -> None:
105 for rsrc in yaml.safe_load_all(yaml_data):
106 # We require kind, metadata.name, and metadata.namespace here.
107 kind = rsrc["kind"]
108 metadata = rsrc["metadata"]
109 name = metadata["name"]
110 namespace = metadata["namespace"]
111
112 key = f"{kind}-v2-{name}-{namespace}"
113
114 if key in self.resources:
115 del self.resources[key]
116
117 # if self.cache is not None:
118 # self.cache.invalidate(key)
119
120 self.deltas[key] = {
121 "kind": kind,
122 "apiVersion": rsrc["apiVersion"],
123 "metadata": {
124 "name": name,
125 "namespace": namespace,
126 "creationTimestamp": metadata.get(
127 "creationTimestamp", "2021-11-19T15:11:45Z"
128 ),
129 },
130 "deltaType": "delete",
131 }
132
133 def build(self) -> Tuple[IR, EnvoyConfig]:
134 # Do a build, return IR & econf, but also stash them in self.builds.
135
136 watt: Dict[str, Any] = {"Kubernetes": {}, "Deltas": list(self.deltas.values())}
137
138 # Clear deltas for the next build.
139 self.deltas = {}
140
141 # The Ambassador resource types are all valid keys in the Kubernetes dict.
142 # Other things (e.g. if this test gets expanded to cover Ingress or Secrets)
143 # may not be.
144
145 for rsrc in self.resources.values():
146 kind = rsrc["kind"]
147
148 if kind not in watt["Kubernetes"]:
149 watt["Kubernetes"][kind] = []
150
151 watt["Kubernetes"][kind].append(rsrc)
152
153 watt_json = json.dumps(watt, sort_keys=True, indent=4)
154
155 self.logger.debug(f"Watt JSON:\n{watt_json}")
156
157 # OK, we have the WATT-formatted JSON. This next bit of code largely duplicates
158 # _load_ir from diagd.
159 #
160 # XXX That obviously means that it should be factored out for reuse.
161
162 # Grab a new aconf, and use a new ResourceFetcher to load it up.
163 aconf = Config()
164
165 fetcher = ResourceFetcher(self.logger, aconf)
166 fetcher.parse_watt(watt_json)
167
168 aconf.load_all(fetcher.sorted())
169
170 # Next up: What kind of reconfiguration are we doing?
171 config_type, reset_cache, invalidate_groups_for = IR.check_deltas(
172 self.logger, fetcher, self.cache
173 )
174
175 # For the tests in this file, we should see cache resets and full reconfigurations
176 # IFF we have no cache.
177
178 if self.cache is None:
179 assert (
180 config_type == "complete"
181 ), "check_deltas wants an incremental reconfiguration with no cache, which it shouldn't"
182 assert (
183 reset_cache
184 ), "check_deltas with no cache does not want to reset the cache, but it should"
185 else:
186 assert (
187 config_type == "incremental"
188 ), "check_deltas with a cache wants a complete reconfiguration, which it shouldn't"
189 assert (
190 not reset_cache
191 ), "check_deltas with a cache wants to reset the cache, which it shouldn't"
192
193 # Once that's done, compile the IR.
194 ir = IR(
195 aconf,
196 logger=self.logger,
197 cache=self.cache,
198 invalidate_groups_for=invalidate_groups_for,
199 file_checker=lambda path: True,
200 secret_handler=self.secret_handler,
201 )
202
203 assert ir, "could not create an IR"
204
205 econf = EnvoyConfig.generate(ir, cache=self.cache)
206
207 assert econf, "could not create an econf"
208
209 self.builds.append((ir, econf))
210
211 return ir, econf
212
213 def invalidate(self, key) -> None:
214 if self.cache is not None:
215 assert self.cache[key] is not None, f"key {key} is not cached"
216
217 self.cache.invalidate(key)
218
219 def check(
220 self,
221 what: str,
222 b1: Tuple[IR, EnvoyConfig],
223 b2: Tuple[IR, EnvoyConfig],
224 strip_cache_keys=False,
225 ) -> bool:
226 for kind, idx in [("IR", 0), ("econf", 1)]:
227 if strip_cache_keys and (idx == 0):
228 x1 = self.strip_cache_keys(b1[idx].as_dict())
229 j1 = json.dumps(x1, sort_keys=True, indent=4)
230
231 x2 = self.strip_cache_keys(b2[idx].as_dict())
232 j2 = json.dumps(x2, sort_keys=True, indent=4)
233 else:
234 j1 = b1[idx].as_json()
235 j2 = b2[idx].as_json()
236
237 match = j1 == j2
238
239 output = ""
240
241 if not match:
242 l1 = j1.split("\n")
243 l2 = j2.split("\n")
244
245 n1 = f"{what} {kind} 1"
246 n2 = f"{what} {kind} 2"
247
248 output += "\n--------\n"
249
250 for line in difflib.context_diff(l1, l2, fromfile=n1, tofile=n2):
251 line = line.rstrip()
252 output += line
253 output += "\n"
254
255 assert match, output
256
257 return match
258
259 def check_last(self, what: str) -> None:
260 build_count = len(self.builds)
261
262 b1 = self.builds[build_count - 2]
263 b2 = self.builds[build_count - 1]
264
265 self.check(what, b1, b2)
266
267 def strip_cache_keys(self, node: Any) -> Any:
268 if isinstance(node, dict):
269 output = {}
270 for k, v in node.items():
271 if k == "_cache_key":
272 continue
273
274 output[k] = self.strip_cache_keys(v)
275
276 return output
277 elif isinstance(node, list):
278 return [self.strip_cache_keys(x) for x in node]
279
280 return node
281
282
283@pytest.mark.compilertest
284def test_circular_link(tmp_path):
285 builder = Builder(logger, tmp_path, "cache_test_1.yaml")
286 builder.build()
287 assert builder.cache
288
289 # This Can't Happen(tm) in Ambassador, but it's important that it not go
290 # off the rails. Find a Mapping...
291 mapping_key = "Mapping-v2-foo-4-default"
292 m = builder.cache[mapping_key]
293 assert m
294
295 # ...then walk the link chain until we get to a V2-Cluster.
296 worklist = [m.cache_key]
297 cluster_key: Optional[str] = None
298
299 while worklist:
300 key = worklist.pop(0)
301
302 if key.startswith("V3-Cluster"):
303 cluster_key = key
304 break
305
306 if key in builder.cache.links:
307 for owned in builder.cache.links[key]:
308 worklist.append(owned)
309
310 assert cluster_key is not None, f"No V3-Cluster linked from {m}?"
311
312 c = builder.cache[cluster_key]
313
314 assert c is not None, f"No V3-Cluster in the cache for {c}"
315
316 builder.cache.link(c, m)
317 builder.cache.invalidate(mapping_key)
318
319 builder.build()
320 builder.check_last("after invalidating circular links")
321
322
323@pytest.mark.compilertest
324def test_multiple_rebuilds(tmp_path):
325 builder = Builder(logger, tmp_path, "cache_test_1.yaml")
326
327 for i in range(10):
328 builder.build()
329
330 if i > 0:
331 builder.check_last(f"rebuild {i-1} -> {i}")
332
333
334@pytest.mark.compilertest
335def test_simple_targets(tmp_path):
336 builder = Builder(logger, tmp_path, "cache_test_1.yaml")
337
338 builder.build()
339 builder.build()
340
341 builder.check_last("immediate rebuild")
342
343 builder.invalidate("Mapping-v2-foo-4-default")
344
345 builder.build()
346
347 builder.check_last("after delete foo-4")
348
349
350@pytest.mark.compilertest
351def test_smashed_targets(tmp_path):
352 builder = Builder(logger, tmp_path, "cache_test_2.yaml")
353
354 builder.build()
355 builder.build()
356
357 builder.check_last("immediate rebuild")
358
359 # Invalidate two things that share common links.
360 builder.invalidate("Mapping-v2-foo-4-default")
361 builder.invalidate("Mapping-v2-foo-6-default")
362
363 builder.build()
364
365 builder.check_last("after invalidating foo-4 and foo-6")
366
367
368@pytest.mark.compilertest
369def test_delta_1(tmp_path):
370 builder1 = Builder(logger, tmp_path, "cache_test_1.yaml")
371 builder2 = Builder(logger, tmp_path, "cache_test_1.yaml", enable_cache=False)
372
373 b1 = builder1.build()
374 b2 = builder2.build()
375
376 builder1.check("baseline", b1, b2, strip_cache_keys=True)
377
378 builder1.apply_yaml("cache_delta_1.yaml")
379 builder2.apply_yaml("cache_delta_1.yaml")
380
381 b1 = builder1.build()
382 b2 = builder2.build()
383
384 builder1.check("after delta", b1, b2, strip_cache_keys=True)
385
386 builder3 = Builder(logger, tmp_path, "cache_result_1.yaml")
387 b3 = builder3.build()
388
389 builder3.check("final", b3, b1)
390
391
392@pytest.mark.compilertest
393def test_delta_2(tmp_path):
394 builder1 = Builder(logger, tmp_path, "cache_test_2.yaml")
395 builder2 = Builder(logger, tmp_path, "cache_test_2.yaml", enable_cache=False)
396
397 b1 = builder1.build()
398 b2 = builder2.build()
399
400 builder1.check("baseline", b1, b2, strip_cache_keys=True)
401
402 builder1.apply_yaml("cache_delta_2.yaml")
403 builder2.apply_yaml("cache_delta_2.yaml")
404
405 b1 = builder1.build()
406 b2 = builder2.build()
407
408 builder1.check("after delta", b1, b2, strip_cache_keys=True)
409
410 builder3 = Builder(logger, tmp_path, "cache_result_2.yaml")
411 b3 = builder3.build()
412
413 builder3.check("final", b3, b1)
414
415
416@pytest.mark.compilertest
417def test_delta_3(tmp_path):
418 builder1 = Builder(logger, tmp_path, "cache_test_1.yaml")
419 builder2 = Builder(logger, tmp_path, "cache_test_1.yaml", enable_cache=False)
420
421 b1 = builder1.build()
422 b2 = builder2.build()
423
424 builder1.check("baseline", b1, b2, strip_cache_keys=True)
425
426 # Load up five delta files and apply them in a random order.
427 deltas = [f"cache_random_{i}.yaml" for i in [1, 2, 3, 4, 5]]
428 random.shuffle(deltas)
429
430 for delta in deltas:
431 builder1.apply_yaml(delta)
432 builder2.apply_yaml(delta)
433
434 b1 = builder1.build()
435 b2 = builder2.build()
436
437 builder1.check("after deltas", b1, b2, strip_cache_keys=True)
438
439 builder3 = Builder(logger, tmp_path, "cache_result_3.yaml")
440 b3 = builder3.build()
441
442 builder3.check("final", b3, b1)
443
444
445@pytest.mark.compilertest
446def test_delete_4(tmp_path):
447 builder1 = Builder(logger, tmp_path, "cache_test_1.yaml")
448 builder2 = Builder(logger, tmp_path, "cache_test_1.yaml", enable_cache=False)
449
450 b1 = builder1.build()
451 b2 = builder2.build()
452
453 builder1.check("baseline", b1, b2, strip_cache_keys=True)
454
455 # Delete a resource.
456 builder1.delete_yaml("cache_delta_1.yaml")
457 builder2.delete_yaml("cache_delta_1.yaml")
458
459 b1 = builder1.build()
460 b2 = builder2.build()
461
462 builder1.check("after deletion", b1, b2, strip_cache_keys=True)
463
464 builder3 = Builder(logger, tmp_path, "cache_result_4.yaml")
465 b3 = builder3.build()
466
467 builder3.check("final", b3, b1)
468
469
470@pytest.mark.compilertest
471def test_long_cluster_1(tmp_path):
472 # Create a cache for Mappings whose cluster names are too long
473 # to be envoy cluster names and must be truncated.
474 builder1 = Builder(logger, tmp_path, "cache_test_3.yaml")
475 builder2 = Builder(logger, tmp_path, "cache_test_3.yaml", enable_cache=False)
476
477 b1 = builder1.build()
478 b2 = builder2.build()
479
480 print("checking baseline...")
481 builder1.check("baseline", b1, b2, strip_cache_keys=True)
482
483 # Apply the second Mapping, make sure we use the same cached cluster
484 builder1.apply_yaml("cache_delta_3.yaml")
485 builder2.apply_yaml("cache_delta_3.yaml")
486
487 b1 = builder1.build()
488 b2 = builder2.build()
489
490 print("checking after apply...")
491 builder1.check("after apply", b1, b2, strip_cache_keys=True)
492
493 print("test_long_cluster_1 done")
494
495
496@pytest.mark.compilertest
497def test_mappings_same_name_delta(tmp_path):
498 # Tests that multiple Mappings with the same name (but in different namespaces)
499 # are properly added/removed from the cache when they are updated.
500 builder = Builder(logger, tmp_path, "cache_test_4.yaml")
501 b = builder.build()
502 econf = b[1].as_dict()
503
504 # loop through all the clusters in the resulting envoy config and pick out two Mappings from our test set (first and lase)
505 # to ensure their clusters were generated properly.
506 cluster1_ok = False
507 cluster2_ok = False
508 for cluster in econf["static_resources"]["clusters"]:
509 cname = cluster.get("name", None)
510 assert cname is not None, f"Error, cluster missing cluster name in econf"
511 # The 6666 in the cluster name comes from the Mapping.spec.service's port
512 if cname == "cluster_bar_0_example_com_6666_bar0":
513 cluster1_ok = True
514 elif cname == "cluster_bar_9_example_com_6666_bar9":
515 cluster2_ok = True
516 if cluster1_ok and cluster2_ok:
517 break
518 assert cluster1_ok and cluster2_ok, "clusters could not be found with the correct envoy config"
519
520 # Update the yaml for these Mappings to simulate a reconfiguration
521 # We should properly remove the cache entries for these clusters when that happens.
522 builder.apply_yaml("cache_test_5.yaml")
523 b = builder.build()
524 econf = b[1].as_dict()
525
526 cluster1_ok = False
527 cluster2_ok = False
528 for cluster in econf["static_resources"]["clusters"]:
529 cname = cluster.get("name", None)
530 assert cname is not None, f"Error, cluster missing cluster name in econf"
531 # We can check the cluster name to identify if the clusters were updated properly
532 # because in the deltas for the yaml we applied, we changed the port to 7777
533 # If there was an issue removing the initial ones from the cache then we should see
534 # 6666 in this field and not find the cluster names below.
535 if cname == "cluster_bar_0_example_com_7777_bar0":
536 cluster1_ok = True
537 elif cname == "cluster_bar_9_example_com_7777_bar9":
538 cluster2_ok = True
539 if cluster1_ok and cluster2_ok:
540 break
541 assert (
542 cluster1_ok and cluster2_ok
543 ), "clusters could not be found with the correct econf after updating their config"
544
545
546MadnessVerifier = Callable[[Tuple[IR, EnvoyConfig]], bool]
547
548
549class MadnessMapping:
550 name: str
551 pfx: str
552 service: str
553
554 def __init__(self, name, pfx, svc) -> None:
555 self.name = name
556 self.pfx = pfx
557 self.service = svc
558
559 # This is only OK for service names without any weirdnesses.
560 self.cluster = "cluster_" + re.sub(r"[^0-9A-Za-z_]", "_", self.service) + "_default"
561
562 def __str__(self) -> str:
563 return f"MadnessMapping {self.name}: {self.pfx} => {self.service}"
564
565 def yaml(self) -> str:
566 return f"""
567apiVersion: getambassador.io/v3alpha1
568kind: Mapping
569metadata:
570 name: {self.name}
571 namespace: default
572spec:
573 prefix: {self.pfx}
574 service: {self.service}
575"""
576
577
578class MadnessOp:
579 name: str
580 op: str
581 mapping: MadnessMapping
582 verifiers: List[MadnessVerifier]
583 tmpdir: Path
584
585 def __init__(
586 self,
587 name: str,
588 op: str,
589 mapping: MadnessMapping,
590 verifiers: List[MadnessVerifier],
591 tmpdir: Path,
592 ) -> None:
593 self.name = name
594 self.op = op
595 self.mapping = mapping
596 self.verifiers = verifiers
597 self.tmpdir = tmpdir
598
599 def __str__(self) -> str:
600 return self.name
601
602 def exec(self, builder1: Builder, builder2: Builder, dumpfile: Optional[str] = None) -> bool:
603 verifiers: List[MadnessVerifier] = []
604
605 if self.op == "apply":
606 builder1.apply_yaml_string(self.mapping.yaml())
607 builder2.apply_yaml_string(self.mapping.yaml())
608
609 verifiers.append(self._cluster_present)
610 elif self.op == "delete":
611 builder1.delete_yaml_string(self.mapping.yaml())
612 builder2.delete_yaml_string(self.mapping.yaml())
613
614 verifiers.append(self._cluster_absent)
615 else:
616 raise Exception(f"Unknown op {self.op}")
617
618 logger.info("======== builder1:")
619 logger.info("INPUT: %s" % builder1.current_yaml())
620
621 b1 = builder1.build()
622
623 logger.info("IR: %s" % json.dumps(b1[0].as_dict(), indent=2, sort_keys=True))
624
625 logger.info("======== builder2:")
626 logger.info("INPUT: %s" % builder2.current_yaml())
627
628 b2 = builder2.build()
629
630 logger.info("IR: %s" % json.dumps(b2[0].as_dict(), indent=2, sort_keys=True))
631
632 if dumpfile:
633 json.dump(
634 b1[0].as_dict(),
635 open(str(self.tmpdir / f"{dumpfile}-1.json"), "w"),
636 indent=2,
637 sort_keys=True,
638 )
639 json.dump(
640 b2[0].as_dict(),
641 open(str(self.tmpdir / f"{dumpfile}-2.json"), "w"),
642 indent=2,
643 sort_keys=True,
644 )
645
646 if not builder1.check(self.name, b1, b2, strip_cache_keys=True):
647 return False
648
649 verifiers += self.verifiers
650
651 for v in verifiers:
652 # for b in [ b1 ]:
653 for b in [b1, b2]:
654 # The verifiers are meant to do assertions. The return value is
655 # about short-circuiting the loop, not logging the errors.
656 if not v(b):
657 return False
658
659 return True
660
661 def _cluster_present(self, b: Tuple[IR, EnvoyConfig]) -> bool:
662 ir, econf = b
663
664 ir_has_cluster = ir.has_cluster(self.mapping.cluster)
665 assert (
666 ir_has_cluster
667 ), f"{self.name}: needed IR cluster {self.mapping.cluster}, have only {', '.join(ir.clusters.keys())}"
668
669 return ir_has_cluster
670
671 def _cluster_absent(self, b: Tuple[IR, EnvoyConfig]) -> bool:
672 ir, econf = b
673
674 ir_has_cluster = ir.has_cluster(self.mapping.cluster)
675 assert (
676 not ir_has_cluster
677 ), f"{self.name}: needed no IR cluster {self.mapping.cluster}, but found it"
678
679 return not ir_has_cluster
680
681 def check_group(
682 self, b: Tuple[IR, EnvoyConfig], current_mappings: Dict[MadnessMapping, bool]
683 ) -> bool:
684 ir, econf = b
685 match = False
686
687 group = ir.groups.get("3644d75eb336f323bec43e48d4cfd8a950157607", None)
688
689 if current_mappings:
690 # There are some active mappings. Make sure that the group exists, that it has the
691 # correct mappings, and that the mappings have sane weights.
692 assert (
693 group
694 ), f"{self.name}: needed group 3644d75eb336f323bec43e48d4cfd8a950157607, but none found"
695
696 # We expect the mappings to be sorted in the group, because every change to the
697 # mappings that are part of the group should result in the whole group being torn
698 # down and recreated.
699 wanted_services = sorted([m.service for m in current_mappings.keys()])
700 found_services = [m.service for m in group.mappings]
701
702 match1 = wanted_services == found_services
703 assert (
704 match1
705 ), f"{self.name}: wanted services {wanted_services}, but found {found_services}"
706
707 weight_delta = 100 // len(current_mappings)
708 wanted_weights: List[int] = [
709 (i + 1) * weight_delta for i in range(len(current_mappings))
710 ]
711 wanted_weights[-1] = 100
712
713 found_weights: List[int] = [m._weight for m in group.mappings]
714
715 match2 = wanted_weights == found_weights
716 assert (
717 match2
718 ), f"{self.name}: wanted weights {wanted_weights}, but found {found_weights}"
719
720 return match1 and match2
721 else:
722 # There are no active mappings, so make sure that the group doesn't exist.
723 assert (
724 not group
725 ), f"{self.name}: needed no group 3644d75eb336f323bec43e48d4cfd8a950157607, but found one"
726 match = True
727
728 return match
729
730
731@pytest.mark.compilertest
732def test_cache_madness(tmp_path):
733 builder1 = Builder(logger, tmp_path, "/dev/null")
734 builder2 = Builder(logger, tmp_path, "/dev/null", enable_cache=False)
735
736 logger.info("======== builder1:")
737 logger.info("INPUT: %s" % builder1.current_yaml())
738
739 b1 = builder1.build()
740
741 logger.info("IR: %s" % json.dumps(b1[0].as_dict(), indent=2, sort_keys=True))
742
743 logger.info("======== builder2:")
744 logger.info("INPUT: %s" % builder2.current_yaml())
745
746 b2 = builder2.build()
747
748 logger.info("IR: %s" % json.dumps(b2[0].as_dict(), indent=2, sort_keys=True))
749
750 builder1.check("baseline", b1, b2, strip_cache_keys=True)
751
752 # We're going to mix and match some changes to the config,
753 # in a random order.
754
755 all_mappings = [
756 MadnessMapping("mapping1", "/foo/", "service1"),
757 MadnessMapping("mapping2", "/foo/", "service2"),
758 MadnessMapping("mapping3", "/foo/", "service3"),
759 MadnessMapping("mapping4", "/foo/", "service4"),
760 MadnessMapping("mapping5", "/foo/", "service5"),
761 ]
762
763 current_mappings: Dict[MadnessMapping, bool] = {}
764
765 # grunge = [ all_mappings[i] for i in [ 0, 3, 2 ] ]
766
767 # for i in range(len(grunge)):
768 # mapping = grunge[i]
769
770 for i in range(0, 100):
771 mapping = random.choice(all_mappings)
772 op: MadnessOp
773
774 if mapping in current_mappings:
775 del current_mappings[mapping]
776 op = MadnessOp(
777 name=f"delete {mapping.pfx} -> {mapping.service}",
778 op="delete",
779 mapping=mapping,
780 verifiers=[lambda b: op.check_group(b, current_mappings)],
781 tmpdir=tmp_path,
782 )
783 else:
784 current_mappings[mapping] = True
785 op = MadnessOp(
786 name=f"apply {mapping.pfx} -> {mapping.service}",
787 op="apply",
788 mapping=mapping,
789 verifiers=[lambda b: op.check_group(b, current_mappings)],
790 tmpdir=tmp_path,
791 )
792
793 print(
794 "==== EXEC %d: %s => %s" % (i, op, sorted([m.service for m in current_mappings.keys()]))
795 )
796 logger.info("======== EXEC %d: %s", i, op)
797
798 # if not op.exec(builder1, None, dumpfile=f"ir{i}"):
799 if not op.exec(builder1, builder2, dumpfile=f"ir{i}"):
800 break
801
802
803if __name__ == "__main__":
804 pytest.main(sys.argv)
View as plain text