package objectrestarter import ( "context" "os" "testing" "time" "github.com/stretchr/testify/suite" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/cli-utils/pkg/object" "sigs.k8s.io/controller-runtime/pkg/client" "edge-infra.dev/pkg/k8s/runtime/sap" "edge-infra.dev/test/framework" "edge-infra.dev/test/framework/integration" "edge-infra.dev/test/framework/k8s" "edge-infra.dev/test/framework/k8s/envtest" ) func TestMain(m *testing.M) { framework.HandleFlags() os.Exit(m.Run()) } type Suite struct { *framework.Framework *k8s.K8s ctx context.Context timeout time.Duration tick time.Duration workloadName string } // Test setup function which runs the rest of the tests via Suite func TestObjectRestarter(t *testing.T) { testEnv := envtest.Setup(envtest.WithoutCRDs()) k := k8s.New(testEnv.Config) f := framework.New("objectrestarter").Register(k) s := &Suite{ Framework: f, K8s: k, ctx: context.Background(), timeout: k8s.Timeouts.DefaultTimeout, tick: k8s.Timeouts.Tick, workloadName: "redis-master", } suite.Run(t, s) t.Cleanup(func() { f.NoError(testEnv.Stop()) }) } func (s *Suite) TestObjectRestarter_Integration() { integration.SkipIfNot(s.Framework) _, deployment := s.testWorkload() if err := s.Client.Create(s.ctx, deployment); err != nil { s.FailNow("failed to create deployment", err) } s.Log("waiting on deployment to become ready so it can be restarted") // TODO(aw185176): is this correct wrt client options? handle in framework/k8s sweep mgr, err := sap.NewResourceManagerFromConfig( s.RESTConfig(), client.Options{}, k8s.FieldManagerOwner(s.Framework), ) if err != nil { s.FailNow("failed to create resource manager", err) } err = mgr.WaitForSet(s.ctx, []object.ObjMetadata{ { Name: deployment.Name, Namespace: deployment.Namespace, // for some reason deployment.GroupVersionKind().GroupKind() doesn't work GroupKind: schema.GroupKind{ Group: "apps", Kind: "Deployment", }, }, }, sap.WaitOptions{Timeout: s.timeout}) s.NoError(err, "deployment never became ready") s.restart(deployment) s.checkAnnotation(deployment) // this is actually quite paranoid as we are verifying that the built-in K8s // mechanism works, but it is useful to confirm that we are using the // mechanism correctly both now and over time as K8s itself changes. s.Log("waiting for deployment to restart") d := &appsv1.Deployment{} s.Eventually(func() bool { _ = s.Client.Get(s.ctx, client.ObjectKeyFromObject(deployment), d) return deployCondition( d.Status.Conditions, appsv1.DeploymentProgressing, corev1.ConditionTrue, ) }, s.timeout, s.tick, "deployment never began progressing again") } func (s *Suite) TestObjectRestarter_Unit() { // we don't want to run this for integration tests because it does the same // thing and we don't want to create additional namespaces during integration // tests integration.SkipIf(s.Framework) namespace, deployment := s.testWorkload() if err := s.Client.Create(s.ctx, namespace); err != nil { s.FailNow("failed to create namespace", err) } if err := s.Client.Create(s.ctx, deployment); err != nil { s.FailNow("failed to create deployment", err) } s.restart(deployment) s.checkAnnotation(deployment) } func (s *Suite) TestIsObjectRestartable() { s.True(IsObjectRestartable(&appsv1.Deployment{})) s.True(IsObjectRestartable(&appsv1.StatefulSet{})) s.True(IsObjectRestartable(&appsv1.DaemonSet{})) workload := &unstructured.Unstructured{} workload.SetGroupVersionKind(appsv1.SchemeGroupVersion.WithKind("Deployment")) s.True(IsObjectRestartable(workload)) } // // test helpers // func (s *Suite) checkAnnotation(deployment *appsv1.Deployment) { s.Log("fetching deployment to confirm it was changed") d := &appsv1.Deployment{} s.NoError(s.Client.Get(s.ctx, client.ObjectKeyFromObject(deployment), d)) s.True( v1.HasAnnotation(d.Spec.Template.ObjectMeta, restartAnnotation), "deployment was not annotated", ) } func (s *Suite) restart(deployment *appsv1.Deployment) { s.Log("patching deployment with restart annotation") s.NoError(Restart(s.ctx, s.Client, deployment), "failed to patch deployment") } func (s *Suite) testWorkload() (namespace *corev1.Namespace, deployment *appsv1.Deployment) { n := &corev1.Namespace{ ObjectMeta: v1.ObjectMeta{ Name: "obj-restarter-rest-deployment", }, } d := &appsv1.Deployment{ ObjectMeta: v1.ObjectMeta{ Name: "redis-master", }, Spec: appsv1.DeploymentSpec{ Selector: &v1.LabelSelector{ MatchLabels: map[string]string{ "app": "redis", }, }, Template: corev1.PodTemplateSpec{ ObjectMeta: v1.ObjectMeta{ Labels: map[string]string{ "app": "redis", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { Name: "master", Image: "redis:6", }, }, }, }, }, } if integration.IsIntegrationTest() { d.Namespace = s.Namespace } else { d.Namespace = n.Name } return n, d } // this is janky as hell because i couldn't figure out how to get the kstatus // generic status readers stuff working func deployCondition(cc []appsv1.DeploymentCondition, t appsv1.DeploymentConditionType, s corev1.ConditionStatus) bool { for _, c := range cc { if c.Type == t && c.Status == s { return true } } return false }