1import difflib
2import json
3import logging
4import os
5import random
6import re
7import sys
8from typing import Any, Callable, Dict, List, Optional, OrderedDict, Tuple
9
10import pytest
11import yaml
12
13logging.basicConfig(
14 level=logging.DEBUG,
15 format="%(asctime)s test %(levelname)s: %(message)s",
16 datefmt="%Y-%m-%d %H:%M:%S",
17)
18
19logger = logging.getLogger("ambassador")
20logger.setLevel(logging.DEBUG)
21
22from ambassador import IR, Cache, Config, EnvoyConfig
23from ambassador.fetch import ResourceFetcher
24from ambassador.utils import NullSecretHandler
25
26
27class Builder:
28 def __init__(self, logger: logging.Logger, yaml_file: str, enable_cache=True) -> None:
29 self.logger = logger
30
31 self.test_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "testdata")
32
33 self.cache: Optional[Cache] = None
34
35 if enable_cache:
36 self.cache = Cache(logger)
37
38 # This is a brutal hack: we load all the YAML, store it as objects, then
39 # build IR and econf from the re-serialized YAML from these resources.
40 # The reason is that it's kind of the only way we can apply deltas in
41 # a meaningful way.
42 self.resources: Dict[str, Any] = {}
43 self.deltas: Dict[str, Any] = {}
44
45 # Load the initial YAML.
46 self.apply_yaml(yaml_file, allow_updates=False)
47 self.secret_handler = NullSecretHandler(
48 logger, "/tmp/secrets/src", "/tmp/secrets/cache", "0"
49 )
50
51 # Save builds to make this simpler to call.
52 self.builds: List[Tuple[IR, EnvoyConfig]] = []
53
54 def current_yaml(self) -> str:
55 return yaml.safe_dump_all(list(self.resources.values()))
56
57 def apply_yaml(self, yaml_file: str, allow_updates=True) -> None:
58 yaml_data = open(os.path.join(self.test_dir, yaml_file), "r").read()
59
60 self.apply_yaml_string(yaml_data, allow_updates)
61
62 def apply_yaml_string(self, yaml_data: str, allow_updates=True) -> None:
63 for rsrc in yaml.safe_load_all(yaml_data):
64 # We require kind, metadata.name, and metadata.namespace here.
65 kind = rsrc["kind"]
66 metadata = rsrc["metadata"]
67 name = metadata["name"]
68 namespace = metadata["namespace"]
69
70 key = f"{kind}-v2-{name}-{namespace}"
71
72 dtype = "add"
73
74 if key in self.resources:
75 # This is an attempted update.
76 if not allow_updates:
77 raise RuntimeError(f"Cannot update {key}")
78
79 dtype = "update"
80
81 # if self.cache is not None:
82 # self.cache.invalidate(key)
83
84 self.resources[key] = rsrc
85 self.deltas[key] = {
86 "kind": kind,
87 "apiVersion": rsrc["apiVersion"],
88 "metadata": {
89 "name": name,
90 "namespace": namespace,
91 "creationTimestamp": metadata.get("creationTimestamp", "2021-11-19T15:11:45Z"),
92 },
93 "deltaType": dtype,
94 }
95
96 def delete_yaml(self, yaml_file: str) -> None:
97 yaml_data = open(os.path.join(self.test_dir, yaml_file), "r").read()
98
99 self.delete_yaml_string(yaml_data)
100
101 def delete_yaml_string(self, yaml_data: str) -> None:
102 for rsrc in yaml.safe_load_all(yaml_data):
103 # We require kind, metadata.name, and metadata.namespace here.
104 kind = rsrc["kind"]
105 metadata = rsrc["metadata"]
106 name = metadata["name"]
107 namespace = metadata["namespace"]
108
109 key = f"{kind}-v2-{name}-{namespace}"
110
111 if key in self.resources:
112 del self.resources[key]
113
114 # if self.cache is not None:
115 # self.cache.invalidate(key)
116
117 self.deltas[key] = {
118 "kind": kind,
119 "apiVersion": rsrc["apiVersion"],
120 "metadata": {
121 "name": name,
122 "namespace": namespace,
123 "creationTimestamp": metadata.get(
124 "creationTimestamp", "2021-11-19T15:11:45Z"
125 ),
126 },
127 "deltaType": "delete",
128 }
129
130 def build(self, version="V2") -> Tuple[IR, EnvoyConfig]:
131 # Do a build, return IR & econf, but also stash them in self.builds.
132
133 watt: Dict[str, Any] = {"Kubernetes": {}, "Deltas": list(self.deltas.values())}
134
135 # Clear deltas for the next build.
136 self.deltas = {}
137
138 # The Ambassador resource types are all valid keys in the Kubernetes dict.
139 # Other things (e.g. if this test gets expanded to cover Ingress or Secrets)
140 # may not be.
141
142 for rsrc in self.resources.values():
143 kind = rsrc["kind"]
144
145 if kind not in watt["Kubernetes"]:
146 watt["Kubernetes"][kind] = []
147
148 watt["Kubernetes"][kind].append(rsrc)
149
150 watt_json = json.dumps(watt, sort_keys=True, indent=4)
151
152 self.logger.debug(f"Watt JSON:\n{watt_json}")
153
154 # OK, we have the WATT-formatted JSON. This next bit of code largely duplicates
155 # _load_ir from diagd.
156 #
157 # XXX That obviously means that it should be factored out for reuse.
158
159 # Grab a new aconf, and use a new ResourceFetcher to load it up.
160 aconf = Config()
161
162 fetcher = ResourceFetcher(self.logger, aconf)
163 fetcher.parse_watt(watt_json)
164
165 aconf.load_all(fetcher.sorted())
166
167 # Next up: What kind of reconfiguration are we doing?
168 config_type, reset_cache, invalidate_groups_for = IR.check_deltas(
169 self.logger, fetcher, self.cache
170 )
171
172 # For the tests in this file, we should see cache resets and full reconfigurations
173 # IFF we have no cache.
174
175 if self.cache is None:
176 assert (
177 config_type == "complete"
178 ), "check_deltas wants an incremental reconfiguration with no cache, which it shouldn't"
179 assert (
180 reset_cache
181 ), "check_deltas with no cache does not want to reset the cache, but it should"
182 else:
183 assert (
184 config_type == "incremental"
185 ), "check_deltas with a cache wants a complete reconfiguration, which it shouldn't"
186 assert (
187 not reset_cache
188 ), "check_deltas with a cache wants to reset the cache, which it shouldn't"
189
190 # Once that's done, compile the IR.
191 ir = IR(
192 aconf,
193 logger=self.logger,
194 cache=self.cache,
195 invalidate_groups_for=invalidate_groups_for,
196 file_checker=lambda path: True,
197 secret_handler=self.secret_handler,
198 )
199
200 assert ir, "could not create an IR"
201
202 econf = EnvoyConfig.generate(ir, version, cache=self.cache)
203
204 assert econf, "could not create an econf"
205
206 self.builds.append((ir, econf))
207
208 return ir, econf
209
210 def invalidate(self, key) -> None:
211 if self.cache is not None:
212 assert self.cache[key] is not None, f"key {key} is not cached"
213
214 self.cache.invalidate(key)
215
216 def check(
217 self,
218 what: str,
219 b1: Tuple[IR, EnvoyConfig],
220 b2: Tuple[IR, EnvoyConfig],
221 strip_cache_keys=False,
222 ) -> bool:
223 for kind, idx in [("IR", 0), ("econf", 1)]:
224 if strip_cache_keys and (idx == 0):
225 x1 = self.strip_cache_keys(b1[idx].as_dict())
226 j1 = json.dumps(x1, sort_keys=True, indent=4)
227
228 x2 = self.strip_cache_keys(b2[idx].as_dict())
229 j2 = json.dumps(x2, sort_keys=True, indent=4)
230 else:
231 j1 = b1[idx].as_json()
232 j2 = b2[idx].as_json()
233
234 match = j1 == j2
235
236 output = ""
237
238 if not match:
239 l1 = j1.split("\n")
240 l2 = j2.split("\n")
241
242 n1 = f"{what} {kind} 1"
243 n2 = f"{what} {kind} 2"
244
245 output += "\n--------\n"
246
247 for line in difflib.context_diff(l1, l2, fromfile=n1, tofile=n2):
248 line = line.rstrip()
249 output += line
250 output += "\n"
251
252 assert match, output
253
254 return match
255
256 def check_last(self, what: str) -> None:
257 build_count = len(self.builds)
258
259 b1 = self.builds[build_count - 2]
260 b2 = self.builds[build_count - 1]
261
262 self.check(what, b1, b2)
263
264 def strip_cache_keys(self, node: Any) -> Any:
265 if isinstance(node, dict):
266 output = {}
267 for k, v in node.items():
268 if k == "_cache_key":
269 continue
270
271 output[k] = self.strip_cache_keys(v)
272
273 return output
274 elif isinstance(node, list):
275 return [self.strip_cache_keys(x) for x in node]
276
277 return node
278
279
280@pytest.mark.compilertest
281def test_circular_link():
282 builder = Builder(logger, "cache_test_1.yaml")
283 builder.build()
284
285 # This Can't Happen(tm) in Ambassador, but it's important that it not go
286 # off the rails. Find a Mapping...
287 mapping_key = "Mapping-v2-foo-4-default"
288 m = builder.cache[mapping_key]
289
290 # ...then walk the link chain until we get to a V2-Cluster.
291 worklist = [m.cache_key]
292 cluster_key: Optional[str] = None
293
294 while worklist:
295 key = worklist.pop(0)
296
297 if key.startswith("V2-Cluster"):
298 cluster_key = key
299 break
300
301 if key in builder.cache.links:
302 for owned in builder.cache.links[key]:
303 worklist.append(owned)
304
305 assert cluster_key is not None, f"No V2-Cluster linked from {m}?"
306
307 c = builder.cache[cluster_key]
308
309 assert c is not None, f"No V2-Cluster in the cache for {c}"
310
311 builder.cache.link(c, m)
312 builder.cache.invalidate(mapping_key)
313
314 builder.build()
315 builder.check_last("after invalidating circular links")
316
317
318@pytest.mark.compilertest
319def test_multiple_rebuilds():
320 builder = Builder(logger, "cache_test_1.yaml")
321
322 for i in range(10):
323 builder.build()
324
325 if i > 0:
326 builder.check_last(f"rebuild {i-1} -> {i}")
327
328
329@pytest.mark.compilertest
330def test_simple_targets():
331 builder = Builder(logger, "cache_test_1.yaml")
332
333 builder.build()
334 builder.build()
335
336 builder.check_last("immediate rebuild")
337
338 builder.invalidate("Mapping-v2-foo-4-default")
339
340 builder.build()
341
342 builder.check_last("after delete foo-4")
343
344
345@pytest.mark.compilertest
346def test_smashed_targets():
347 builder = Builder(logger, "cache_test_2.yaml")
348
349 builder.build()
350 builder.build()
351
352 builder.check_last("immediate rebuild")
353
354 # Invalidate two things that share common links.
355 builder.invalidate("Mapping-v2-foo-4-default")
356 builder.invalidate("Mapping-v2-foo-6-default")
357
358 builder.build()
359
360 builder.check_last("after invalidating foo-4 and foo-6")
361
362
363@pytest.mark.compilertest
364def test_delta_1():
365 builder1 = Builder(logger, "cache_test_1.yaml")
366 builder2 = Builder(logger, "cache_test_1.yaml", enable_cache=False)
367
368 b1 = builder1.build()
369 b2 = builder2.build()
370
371 builder1.check("baseline", b1, b2, strip_cache_keys=True)
372
373 builder1.apply_yaml("cache_delta_1.yaml")
374 builder2.apply_yaml("cache_delta_1.yaml")
375
376 b1 = builder1.build()
377 b2 = builder2.build()
378
379 builder1.check("after delta", b1, b2, strip_cache_keys=True)
380
381 builder3 = Builder(logger, "cache_result_1.yaml")
382 b3 = builder3.build()
383
384 builder3.check("final", b3, b1)
385
386
387@pytest.mark.compilertest
388def test_delta_2():
389 builder1 = Builder(logger, "cache_test_2.yaml")
390 builder2 = Builder(logger, "cache_test_2.yaml", enable_cache=False)
391
392 b1 = builder1.build()
393 b2 = builder2.build()
394
395 builder1.check("baseline", b1, b2, strip_cache_keys=True)
396
397 builder1.apply_yaml("cache_delta_2.yaml")
398 builder2.apply_yaml("cache_delta_2.yaml")
399
400 b1 = builder1.build()
401 b2 = builder2.build()
402
403 builder1.check("after delta", b1, b2, strip_cache_keys=True)
404
405 builder3 = Builder(logger, "cache_result_2.yaml")
406 b3 = builder3.build()
407
408 builder3.check("final", b3, b1)
409
410
411@pytest.mark.compilertest
412def test_delta_3():
413 builder1 = Builder(logger, "cache_test_1.yaml")
414 builder2 = Builder(logger, "cache_test_1.yaml", enable_cache=False)
415
416 b1 = builder1.build()
417 b2 = builder2.build()
418
419 builder1.check("baseline", b1, b2, strip_cache_keys=True)
420
421 # Load up five delta files and apply them in a random order.
422 deltas = [f"cache_random_{i}.yaml" for i in [1, 2, 3, 4, 5]]
423 random.shuffle(deltas)
424
425 for delta in deltas:
426 builder1.apply_yaml(delta)
427 builder2.apply_yaml(delta)
428
429 b1 = builder1.build()
430 b2 = builder2.build()
431
432 builder1.check("after deltas", b1, b2, strip_cache_keys=True)
433
434 builder3 = Builder(logger, "cache_result_3.yaml")
435 b3 = builder3.build()
436
437 builder3.check("final", b3, b1)
438
439
440@pytest.mark.compilertest
441def test_delete_4():
442 builder1 = Builder(logger, "cache_test_1.yaml")
443 builder2 = Builder(logger, "cache_test_1.yaml", enable_cache=False)
444
445 b1 = builder1.build()
446 b2 = builder2.build()
447
448 builder1.check("baseline", b1, b2, strip_cache_keys=True)
449
450 # Delete a resource.
451 builder1.delete_yaml("cache_delta_1.yaml")
452 builder2.delete_yaml("cache_delta_1.yaml")
453
454 b1 = builder1.build()
455 b2 = builder2.build()
456
457 builder1.check("after deletion", b1, b2, strip_cache_keys=True)
458
459 builder3 = Builder(logger, "cache_result_4.yaml")
460 b3 = builder3.build()
461
462 builder3.check("final", b3, b1)
463
464
465@pytest.mark.compilertest
466def test_long_cluster_1():
467 # Create a cache for Mappings whose cluster names are too long
468 # to be envoy cluster names and must be truncated.
469 builder1 = Builder(logger, "cache_test_3.yaml")
470 builder2 = Builder(logger, "cache_test_3.yaml", enable_cache=False)
471
472 b1 = builder1.build()
473 b2 = builder2.build()
474
475 print("checking baseline...")
476 builder1.check("baseline", b1, b2, strip_cache_keys=True)
477
478 # Apply the second Mapping, make sure we use the same cached cluster
479 builder1.apply_yaml("cache_delta_3.yaml")
480 builder2.apply_yaml("cache_delta_3.yaml")
481
482 b1 = builder1.build()
483 b2 = builder2.build()
484
485 print("checking after apply...")
486 builder1.check("after apply", b1, b2, strip_cache_keys=True)
487
488 print("test_long_cluster_1 done")
489
490
491@pytest.mark.compilertest
492def test_mappings_same_name_delta():
493 # Tests that multiple Mappings with the same name (but in different namespaces)
494 # are properly added/removed from the cache when they are updated.
495 builder = Builder(logger, "cache_test_4.yaml")
496 b = builder.build()
497 econf = b[1]
498 econf = econf.as_dict()
499
500 # loop through all the clusters in the resulting envoy config and pick out two Mappings from our test set (first and lase)
501 # to ensure their clusters were generated properly.
502 cluster1_ok = False
503 cluster2_ok = False
504 for cluster in econf["static_resources"]["clusters"]:
505 cname = cluster.get("name", None)
506 assert cname is not None, f"Error, cluster missing cluster name in econf"
507 # The 6666 in the cluster name comes from the Mapping.spec.service's port
508 if cname == "cluster_bar_0_example_com_6666_bar0":
509 cluster1_ok = True
510 elif cname == "cluster_bar_9_example_com_6666_bar9":
511 cluster2_ok = True
512 if cluster1_ok and cluster2_ok:
513 break
514 assert cluster1_ok and cluster2_ok, "clusters could not be found with the correct envoy config"
515
516 # Update the yaml for these Mappings to simulate a reconfiguration
517 # We should properly remove the cache entries for these clusters when that happens.
518 builder.apply_yaml("cache_test_5.yaml")
519 b = builder.build()
520 econf = b[1]
521 econf = econf.as_dict()
522
523 cluster1_ok = False
524 cluster2_ok = False
525 for cluster in econf["static_resources"]["clusters"]:
526 cname = cluster.get("name", None)
527 assert cname is not None, f"Error, cluster missing cluster name in econf"
528 # We can check the cluster name to identify if the clusters were updated properly
529 # because in the deltas for the yaml we applied, we changed the port to 7777
530 # If there was an issue removing the initial ones from the cache then we should see
531 # 6666 in this field and not find the cluster names below.
532 if cname == "cluster_bar_0_example_com_7777_bar0":
533 cluster1_ok = True
534 elif cname == "cluster_bar_9_example_com_7777_bar9":
535 cluster2_ok = True
536 if cluster1_ok and cluster2_ok:
537 break
538 assert (
539 cluster1_ok and cluster2_ok
540 ), "clusters could not be found with the correct econf after updating their config"
541
542
543MadnessVerifier = Callable[[Tuple[IR, EnvoyConfig]], bool]
544
545
546class MadnessMapping:
547 name: str
548 pfx: str
549 service: str
550
551 def __init__(self, name, pfx, svc) -> None:
552 self.name = name
553 self.pfx = pfx
554 self.service = svc
555
556 # This is only OK for service names without any weirdnesses.
557 self.cluster = "cluster_" + re.sub(r"[^0-9A-Za-z_]", "_", self.service) + "_default"
558
559 def __str__(self) -> str:
560 return f"MadnessMapping {self.name}: {self.pfx} => {self.service}"
561
562 def yaml(self) -> str:
563 return f"""
564apiVersion: getambassador.io/v3alpha1
565kind: Mapping
566metadata:
567 name: {self.name}
568 namespace: default
569spec:
570 prefix: {self.pfx}
571 service: {self.service}
572"""
573
574
575class MadnessOp:
576 name: str
577 op: str
578 mapping: MadnessMapping
579 verifiers: List[MadnessVerifier]
580
581 def __init__(
582 self, name: str, op: str, mapping: MadnessMapping, verifiers: List[MadnessVerifier]
583 ) -> None:
584 self.name = name
585 self.op = op
586 self.mapping = mapping
587 self.verifiers = verifiers
588
589 def __str__(self) -> str:
590 return self.name
591
592 def exec(self, builder1: Builder, builder2: Builder, dumpfile: Optional[str] = None) -> bool:
593 verifiers: List[MadnessVerifier] = []
594
595 if self.op == "apply":
596 builder1.apply_yaml_string(self.mapping.yaml())
597 builder2.apply_yaml_string(self.mapping.yaml())
598
599 verifiers.append(self._cluster_present)
600 elif self.op == "delete":
601 builder1.delete_yaml_string(self.mapping.yaml())
602 builder2.delete_yaml_string(self.mapping.yaml())
603
604 verifiers.append(self._cluster_absent)
605 else:
606 raise Exception(f"Unknown op {self.op}")
607
608 logger.info("======== builder1:")
609 logger.info("INPUT: %s" % builder1.current_yaml())
610
611 b1 = builder1.build()
612
613 logger.info("IR: %s" % json.dumps(b1[0].as_dict(), indent=2, sort_keys=True))
614
615 logger.info("======== builder2:")
616 logger.info("INPUT: %s" % builder2.current_yaml())
617
618 b2 = builder2.build()
619
620 logger.info("IR: %s" % json.dumps(b2[0].as_dict(), indent=2, sort_keys=True))
621
622 if dumpfile:
623 json.dump(
624 b1[0].as_dict(), open(f"/tmp/{dumpfile}-1.json", "w"), indent=2, sort_keys=True
625 )
626 json.dump(
627 b2[0].as_dict(), open(f"/tmp/{dumpfile}-2.json", "w"), indent=2, sort_keys=True
628 )
629
630 if not builder1.check(self.name, b1, b2, strip_cache_keys=True):
631 return False
632
633 verifiers += self.verifiers
634
635 for v in verifiers:
636 # for b in [ b1 ]:
637 for b in [b1, b2]:
638 # The verifiers are meant to do assertions. The return value is
639 # about short-circuiting the loop, not logging the errors.
640 if not v(b):
641 return False
642
643 return True
644
645 def _cluster_present(self, b: Tuple[IR, EnvoyConfig]) -> bool:
646 ir, econf = b
647
648 ir_has_cluster = ir.has_cluster(self.mapping.cluster)
649 assert (
650 ir_has_cluster
651 ), f"{self.name}: needed IR cluster {self.mapping.cluster}, have only {', '.join(ir.clusters.keys())}"
652
653 return ir_has_cluster
654
655 def _cluster_absent(self, b: Tuple[IR, EnvoyConfig]) -> bool:
656 ir, econf = b
657
658 ir_has_cluster = ir.has_cluster(self.mapping.cluster)
659 assert (
660 not ir_has_cluster
661 ), f"{self.name}: needed no IR cluster {self.mapping.cluster}, but found it"
662
663 return not ir_has_cluster
664
665 def check_group(
666 self, b: Tuple[IR, EnvoyConfig], current_mappings: Dict[MadnessMapping, bool]
667 ) -> bool:
668 ir, econf = b
669 match = False
670
671 group = ir.groups.get("3644d75eb336f323bec43e48d4cfd8a950157607", None)
672
673 if current_mappings:
674 # There are some active mappings. Make sure that the group exists, that it has the
675 # correct mappings, and that the mappings have sane weights.
676 assert (
677 group
678 ), f"{self.name}: needed group 3644d75eb336f323bec43e48d4cfd8a950157607, but none found"
679
680 # We expect the mappings to be sorted in the group, because every change to the
681 # mappings that are part of the group should result in the whole group being torn
682 # down and recreated.
683 wanted_services = sorted([m.service for m in current_mappings.keys()])
684 found_services = [m.service for m in group.mappings]
685
686 match1 = wanted_services == found_services
687 assert (
688 match1
689 ), f"{self.name}: wanted services {wanted_services}, but found {found_services}"
690
691 weight_delta = 100 // len(current_mappings)
692 wanted_weights: List[int] = [
693 (i + 1) * weight_delta for i in range(len(current_mappings))
694 ]
695 wanted_weights[-1] = 100
696
697 found_weights: List[int] = [m._weight for m in group.mappings]
698
699 match2 = wanted_weights == found_weights
700 assert (
701 match2
702 ), f"{self.name}: wanted weights {wanted_weights}, but found {found_weights}"
703
704 return match1 and match2
705 else:
706 # There are no active mappings, so make sure that the group doesn't exist.
707 assert (
708 not group
709 ), f"{self.name}: needed no group 3644d75eb336f323bec43e48d4cfd8a950157607, but found one"
710 match = True
711
712 return match
713
714
715@pytest.mark.compilertest
716def test_cache_madness():
717 builder1 = Builder(logger, "/dev/null")
718 builder2 = Builder(logger, "/dev/null", enable_cache=False)
719
720 logger.info("======== builder1:")
721 logger.info("INPUT: %s" % builder1.current_yaml())
722
723 b1 = builder1.build()
724
725 logger.info("IR: %s" % json.dumps(b1[0].as_dict(), indent=2, sort_keys=True))
726
727 logger.info("======== builder2:")
728 logger.info("INPUT: %s" % builder2.current_yaml())
729
730 b2 = builder2.build()
731
732 logger.info("IR: %s" % json.dumps(b2[0].as_dict(), indent=2, sort_keys=True))
733
734 builder1.check("baseline", b1, b2, strip_cache_keys=True)
735
736 # We're going to mix and match some changes to the config,
737 # in a random order.
738
739 all_mappings = [
740 MadnessMapping("mapping1", "/foo/", "service1"),
741 MadnessMapping("mapping2", "/foo/", "service2"),
742 MadnessMapping("mapping3", "/foo/", "service3"),
743 MadnessMapping("mapping4", "/foo/", "service4"),
744 MadnessMapping("mapping5", "/foo/", "service5"),
745 ]
746
747 current_mappings: OrderedDict[MadnessMapping, bool] = {}
748
749 # grunge = [ all_mappings[i] for i in [ 0, 3, 2 ] ]
750
751 # for i in range(len(grunge)):
752 # mapping = grunge[i]
753
754 for i in range(0, 100):
755 mapping = random.choice(all_mappings)
756 op: MadnessOp
757
758 if mapping in current_mappings:
759 del current_mappings[mapping]
760 op = MadnessOp(
761 name=f"delete {mapping.pfx} -> {mapping.service}",
762 op="delete",
763 mapping=mapping,
764 verifiers=[lambda b: op.check_group(b, current_mappings)],
765 )
766 else:
767 current_mappings[mapping] = True
768 op = MadnessOp(
769 name=f"apply {mapping.pfx} -> {mapping.service}",
770 op="apply",
771 mapping=mapping,
772 verifiers=[lambda b: op.check_group(b, current_mappings)],
773 )
774
775 print(
776 "==== EXEC %d: %s => %s" % (i, op, sorted([m.service for m in current_mappings.keys()]))
777 )
778 logger.info("======== EXEC %d: %s", i, op)
779
780 # if not op.exec(builder1, None, dumpfile=f"ir{i}"):
781 if not op.exec(builder1, builder2, dumpfile=f"ir{i}"):
782 break
783
784
785if __name__ == "__main__":
786 pytest.main(sys.argv)
View as plain text