package integration import ( "context" _ "embed" "fmt" "os" "testing" "time" "gotest.tools/v3/assert/cmp" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/kustomize/api/filters/namespace" "edge-infra.dev/pkg/f8n/warehouse/capability" "edge-infra.dev/pkg/f8n/warehouse/cluster" whv1 "edge-infra.dev/pkg/f8n/warehouse/k8s/apis/v1alpha2" "edge-infra.dev/pkg/f8n/warehouse/k8s/controllers/lumperctl" "edge-infra.dev/pkg/f8n/warehouse/k8s/controllers/lumperctl/internal" "edge-infra.dev/pkg/f8n/warehouse/oci" "edge-infra.dev/pkg/f8n/warehouse/oci/cache" "edge-infra.dev/pkg/f8n/warehouse/oci/layer" "edge-infra.dev/pkg/f8n/warehouse/pallet" "edge-infra.dev/pkg/k8s/object" "edge-infra.dev/pkg/k8s/runtime/conditions" "edge-infra.dev/pkg/k8s/runtime/inventory" "edge-infra.dev/pkg/k8s/unstructured" "edge-infra.dev/pkg/lib/fog" "edge-infra.dev/test/f2" "edge-infra.dev/test/f2/x/ktest" "edge-infra.dev/test/f2/x/warehouse" ) // TODO(wg-testing): update TestXXX funcs to call t.Parallel() once f2 supports var f f2.Framework // testPallet represents a pallet test fixture that is parsed + pushed to the // test registry type testPallet struct { inv *inventory.ResourceInventory digest string name string tag string pallet pallet.Pallet } // Test fixtures var ( //go:embed testdata/minimal-pallet-v1 minimalv1 []byte //go:embed testdata/minimal-pallet-v2 minimalv2 []byte //go:embed testdata/fake-infra.yaml fakeInfra []byte //go:embed testdata/fake-mesh-config.yaml fakeMeshConfig []byte //go:embed testdata/linkerdctl/linkerd.yaml linkerdCapablityProvider []byte //go:embed testdata/meshconfig meshConfig []byte //go:embed testdata/sappers/removes-fields-initial.yaml sapInitial []byte //go:embed testdata/sappers/removes-fields-after.yaml sapFinal []byte l5dCap capability.Capability = "linkerd" ) const fixtureSemVer = "0.2.5-test.1711660462+commit.d34db33f" func TestMain(m *testing.M) { // TODO(aw18576): logr interface impl for t.Log() and let framework set up? // Set global loggers for controller-runtime and other pkgs so that our // reconcilers log during test lumperctl.SetPackageLoggers(fog.New()) healthzChecker := internal.New(ctrl.Log.WithName("lumper-liveness")) f = f2.New(context.Background(), f2.WithExtensions( ktest.New( ktest.WithCtrlManager(lumperctl.CreateMgr(healthzChecker)), ktest.WithMetricsAddress(8080), ), &warehouse.Registry{}, ), ). Setup(func(ctx f2.Context) (f2.Context, error) { k, err := ktest.FromContext(ctx) if err != nil { return ctx, err } // Override timeouts if we aren't using a live cluster if !*k.Env.UseExistingCluster { k.Timeout = 5 * time.Second k.Tick = 10 * time.Millisecond } warehouseCache, err := cache.New( cache.WithMemoryCacheSize(250), ) if err != nil { return ctx, err } // NOTE: Will be nil if ktest.WithCtrlManager was not called, put manager // behind k.Manager() and return error or something? Really shouldn't // happen but people can be dense if err := lumperctl.RegisterControllers( k.Manager, &lumperctl.Config{ Provider: cluster.Generic, // TODO: should test all variations of providers SvcAct: "lumperctl", Namespace: k.Namespace, UpCfg: lumperctl.UnpackedPalletCfg{ Concurrent: 32, DepRequeueInterval: k.Tick, }, ShipCfg: lumperctl.ShipmentCfg{ Concurrent: 8, }, Cache: warehouseCache, }, healthzChecker, ); err != nil { return ctx, err } fmt.Println("after register controllers") fmt.Println("was able to register the controllers") return ctx, nil }) os.Exit(f.Run(m)) } func unpackedPallet( ctx f2.Context, t *testing.T, name string, p pallet.Pallet, opts ...whv1.Option, ) *whv1.UnpackedPallet { t.Helper() reg := warehouse.FromContextT(ctx, t) digest, err := p.Digest() if err != nil { t.Fatalf("failed to compute digest of test pallet %s: %v", p.Name(), err) } repo := fmt.Sprintf("%s/%s", reg.URL, p.Name()) return whv1.NewUnpackedPallet(name, append([]whv1.Option{ whv1.WithArtifact(whv1.NewArtifact(p.Name(), digest.String(), repo)), whv1.ForProvider(cluster.Generic), whv1.WithRuntime(), whv1.WithTimeout(ktest.FromContextT(ctx, t).Tick * 3), whv1.WithRetryInterval(ktest.FromContextT(ctx, t).Tick), }, opts...)..., ) } func shipment( ctx f2.Context, t *testing.T, name string, pallets []whv1.BaseArtifact, opts ...whv1.Option, ) *whv1.Shipment { t.Helper() return whv1.NewShipment(name, append([]whv1.Option{ whv1.FromRepo(warehouse.FromContextT(ctx, t).URL), whv1.WithArtifacts(pallets...), whv1.ForProvider(cluster.Generic), whv1.WithRuntime(), whv1.WithRetryInterval(ktest.FromContextT(ctx, t).Tick), }, opts...)..., ) } func createAndPushPallet( ctx f2.Context, t *testing.T, name, tag string, layers ...layer.Layer, ) testPallet { t.Helper() p := testPallet{name: name, tag: tag} var objs []*unstructured.Unstructured for i, l := range layers { // Transform objects so they are scheduled to test Namespace and avoid // collision with other concurrent tests using same source objects l, err := layer.Filter( l, namespace.Filter{Namespace: ktest.FromContextT(ctx, t).Namespace}, ) if err != nil { t.Fatal("failed to render test pallet manifests", err) } r, err := l.Uncompressed() if err != nil { t.Fatal(name, err) } lobjs, err := object.ReadObjects(r) if err != nil { t.Fatal("failed to read test pallet objects", name, err) } objs = append(objs, lobjs...) // Replace updated layer layers[i] = l } p.inv = inventory.New(inventory.FromUnstructured(objs...)) a, err := pallet.Image(pallet.Options{ Metadata: pkgMeta(name), ClusterProviders: cluster.BuiltInProviders(), }, layers...) if err != nil { t.Fatal("failed to create test pallet", name, err) } p.pallet = a d, err := a.Digest() if err != nil { t.Fatal(name, err) } p.digest = d.String() // Push so package is available for controller to pull pushPkg(ctx, t, a, name, tag) return p } func createAndPushPalletWithDeps( ctx f2.Context, t *testing.T, name, tag string, deps ...oci.Artifact, ) testPallet { t.Helper() p := testPallet{name: name, tag: tag} a, err := pallet.ImageIndex(pallet.Options{ Metadata: pkgMeta(name), ClusterProviders: cluster.BuiltInProviders(), }, deps...) if err != nil { t.Fatal("failed to create test pallet", name, err) } p.pallet = a d, err := a.Digest() if err != nil { t.Fatal(name, err) } p.digest = d.String() // Push so package is available for controller to pull pushPkg(ctx, t, a, name, tag) return p } func createLayer( t *testing.T, lt layer.Type, //nolint // will use other layer.Type in future tests yaml []byte, opts ...layer.Option, ) layer.Layer { t.Helper() l, err := layer.New(lt, yaml, opts...) if err != nil { t.Fatal("failed to create test pallet layer", err) } return l } func pushPkg(ctx f2.Context, t *testing.T, a oci.Artifact, name, tag string) { t.Helper() if err := warehouse.FromContextT(ctx, t).Push(a, name, tag); err != nil { t.Fatal("failed to push test pallet", err) } } func pkgMeta(name string) pallet.Metadata { return pallet.Metadata{ Name: name, Team: "@ncrvoyix-swt-retail/edge-foundation", BuildInfo: pallet.BuildInfo{ Created: "yesterday", Source: "https://gothub.com/ncrvoyix-swt-retail/edge-infra", Revision: "d34db33f", Version: "0.2.5-test.1711660462+commit.d34db33f", }, } } func fakeL5dLayer(t *testing.T) layer.Layer { return createLayer(t, layer.Runtime, fakeMeshConfig, layer.ForCapability(l5dCap)) } func fakeInfraLayer(t *testing.T) layer.Layer { return createLayer(t, layer.Infra, fakeInfra) } func id(ctx f2.Context, name string) string { return fmt.Sprintf("%s-%s", name, ctx.RunID) } func unpackedPalletReady(o client.Object) cmp.Result { u := o.(*whv1.UnpackedPallet) if conditions.IsReady(u) && u.Status.ObservedGeneration == u.Generation && u.IsUpToDate() { return cmp.ResultSuccess } return cmp.ResultFailure(fmt.Sprintf("%s not ready", object.FmtObject(u))) } func shipmentReady(o client.Object) cmp.Result { s := o.(*whv1.Shipment) if conditions.IsReady(s) && s.IsUpToDate() { return cmp.ResultSuccess } return cmp.ResultFailure(fmt.Sprintf("%s not ready", object.FmtObject(s))) } func shipmentStalled(o client.Object) cmp.Result { s := o.(*whv1.Shipment) if conditions.IsStalled(s) && s.IsUpToDate() { return cmp.ResultSuccess } return cmp.ResultFailure(fmt.Sprintf("%s not stalled", object.FmtObject(s))) }