...

Source file src/github.com/onsi/ginkgo/v2/internal/suite.go

Documentation: github.com/onsi/ginkgo/v2/internal

     1  package internal
     2  
     3  import (
     4  	"fmt"
     5  	"sync"
     6  	"time"
     7  
     8  	"github.com/onsi/ginkgo/v2/internal/interrupt_handler"
     9  	"github.com/onsi/ginkgo/v2/internal/parallel_support"
    10  	"github.com/onsi/ginkgo/v2/reporters"
    11  	"github.com/onsi/ginkgo/v2/types"
    12  	"golang.org/x/net/context"
    13  )
    14  
    15  type Phase uint
    16  
    17  const (
    18  	PhaseBuildTopLevel Phase = iota
    19  	PhaseBuildTree
    20  	PhaseRun
    21  )
    22  
    23  var PROGRESS_REPORTER_DEADLING = 5 * time.Second
    24  
    25  type Suite struct {
    26  	tree               *TreeNode
    27  	topLevelContainers Nodes
    28  
    29  	*ProgressReporterManager
    30  
    31  	phase Phase
    32  
    33  	suiteNodes   Nodes
    34  	cleanupNodes Nodes
    35  
    36  	failer            *Failer
    37  	reporter          reporters.Reporter
    38  	writer            WriterInterface
    39  	outputInterceptor OutputInterceptor
    40  	interruptHandler  interrupt_handler.InterruptHandlerInterface
    41  	config            types.SuiteConfig
    42  	deadline          time.Time
    43  
    44  	skipAll              bool
    45  	report               types.Report
    46  	currentSpecReport    types.SpecReport
    47  	currentNode          Node
    48  	currentNodeStartTime time.Time
    49  
    50  	currentSpecContext *specContext
    51  
    52  	currentByStep types.SpecEvent
    53  	timelineOrder int
    54  
    55  	/*
    56  		We don't need to lock around all operations.  Just those that *could* happen concurrently.
    57  
    58  		Suite, generally, only runs one node at a time - and so the possibiity for races is small.  In fact, the presence of a race usually indicates the user has launched a goroutine that has leaked past the node it was launched in.
    59  
    60  		However, there are some operations that can happen concurrently:
    61  
    62  		- AddReportEntry and CurrentSpecReport can be accessed at any point by the user - including in goroutines that outlive the node intentionally (see, e.g. #1020).  They both form a self-contained read-write pair and so a lock in them is sufficent.
    63  		- generateProgressReport can be invoked at any point in time by an interrupt or a progres poll.  Moreover, it requires access to currentSpecReport, currentNode, currentNodeStartTime, and progressStepCursor.  To make it threadsafe we need to lock around generateProgressReport when we read those variables _and_ everywhere those variables are *written*.  In general we don't need to worry about all possible field writes to these variables as what `generateProgressReport` does with these variables is fairly selective (hence the name of the lock).  Specifically, we dont' need to lock around state and failure message changes on `currentSpecReport` - just the setting of the variable itself.
    64  	*/
    65  	selectiveLock *sync.Mutex
    66  
    67  	client parallel_support.Client
    68  }
    69  
    70  func NewSuite() *Suite {
    71  	return &Suite{
    72  		tree:                    &TreeNode{},
    73  		phase:                   PhaseBuildTopLevel,
    74  		ProgressReporterManager: NewProgressReporterManager(),
    75  
    76  		selectiveLock: &sync.Mutex{},
    77  	}
    78  }
    79  
    80  func (suite *Suite) Clone() (*Suite, error) {
    81  	if suite.phase != PhaseBuildTopLevel {
    82  		return nil, fmt.Errorf("cannot clone suite after tree has been built")
    83  	}
    84  	return &Suite{
    85  		tree:                    &TreeNode{},
    86  		phase:                   PhaseBuildTopLevel,
    87  		ProgressReporterManager: NewProgressReporterManager(),
    88  		topLevelContainers:      suite.topLevelContainers.Clone(),
    89  		suiteNodes:              suite.suiteNodes.Clone(),
    90  		selectiveLock:           &sync.Mutex{},
    91  	}, nil
    92  }
    93  
    94  func (suite *Suite) BuildTree() error {
    95  	// During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered
    96  	// We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree
    97  	suite.phase = PhaseBuildTree
    98  	for _, topLevelContainer := range suite.topLevelContainers {
    99  		err := suite.PushNode(topLevelContainer)
   100  		if err != nil {
   101  			return err
   102  		}
   103  	}
   104  	return nil
   105  }
   106  
   107  func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) {
   108  	if suite.phase != PhaseBuildTree {
   109  		panic("cannot run before building the tree = call suite.BuildTree() first")
   110  	}
   111  	ApplyNestedFocusPolicyToTree(suite.tree)
   112  	specs := GenerateSpecsFromTreeRoot(suite.tree)
   113  	specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig)
   114  
   115  	suite.phase = PhaseRun
   116  	suite.client = client
   117  	suite.failer = failer
   118  	suite.reporter = reporter
   119  	suite.writer = writer
   120  	suite.outputInterceptor = outputInterceptor
   121  	suite.interruptHandler = interruptHandler
   122  	suite.config = suiteConfig
   123  
   124  	if suite.config.Timeout > 0 {
   125  		suite.deadline = time.Now().Add(suite.config.Timeout)
   126  	}
   127  
   128  	cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal)
   129  
   130  	success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs)
   131  
   132  	cancelProgressHandler()
   133  
   134  	return success, hasProgrammaticFocus
   135  }
   136  
   137  func (suite *Suite) InRunPhase() bool {
   138  	return suite.phase == PhaseRun
   139  }
   140  
   141  /*
   142    Tree Construction methods
   143  
   144    PushNode is used during PhaseBuildTopLevel and PhaseBuildTree
   145  */
   146  
   147  func (suite *Suite) PushNode(node Node) error {
   148  	if node.NodeType.Is(types.NodeTypeCleanupInvalid | types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
   149  		return suite.pushCleanupNode(node)
   150  	}
   151  
   152  	if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeBeforeSuite | types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
   153  		return suite.pushSuiteNode(node)
   154  	}
   155  
   156  	if suite.phase == PhaseRun {
   157  		return types.GinkgoErrors.PushingNodeInRunPhase(node.NodeType, node.CodeLocation)
   158  	}
   159  
   160  	if node.MarkedSerial {
   161  		firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
   162  		if !firstOrderedNode.IsZero() && !firstOrderedNode.MarkedSerial {
   163  			return types.GinkgoErrors.InvalidSerialNodeInNonSerialOrderedContainer(node.CodeLocation, node.NodeType)
   164  		}
   165  	}
   166  
   167  	if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
   168  		firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
   169  		if firstOrderedNode.IsZero() {
   170  			return types.GinkgoErrors.SetupNodeNotInOrderedContainer(node.CodeLocation, node.NodeType)
   171  		}
   172  	}
   173  
   174  	if node.MarkedContinueOnFailure {
   175  		firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
   176  		if !firstOrderedNode.IsZero() {
   177  			return types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation)
   178  		}
   179  	}
   180  
   181  	if node.NodeType == types.NodeTypeContainer {
   182  		// During PhaseBuildTopLevel we only track the top level containers without entering them
   183  		// We only enter the top level container nodes during PhaseBuildTree
   184  		//
   185  		// This ensures the tree is only constructed after `go spec` has called `flag.Parse()` and gives
   186  		// the user an opportunity to load suiteConfiguration information in the `TestX` go spec hook just before `RunSpecs`
   187  		// is invoked.  This makes the lifecycle easier to reason about and solves issues like #693.
   188  		if suite.phase == PhaseBuildTopLevel {
   189  			suite.topLevelContainers = append(suite.topLevelContainers, node)
   190  			return nil
   191  		}
   192  		if suite.phase == PhaseBuildTree {
   193  			parentTree := suite.tree
   194  			suite.tree = &TreeNode{Node: node}
   195  			parentTree.AppendChild(suite.tree)
   196  			err := func() (err error) {
   197  				defer func() {
   198  					if e := recover(); e != nil {
   199  						err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation)
   200  					}
   201  				}()
   202  				node.Body(nil)
   203  				return err
   204  			}()
   205  			suite.tree = parentTree
   206  			return err
   207  		}
   208  	} else {
   209  		suite.tree.AppendChild(&TreeNode{Node: node})
   210  		return nil
   211  	}
   212  
   213  	return nil
   214  }
   215  
   216  func (suite *Suite) pushSuiteNode(node Node) error {
   217  	if suite.phase == PhaseBuildTree {
   218  		return types.GinkgoErrors.SuiteNodeInNestedContext(node.NodeType, node.CodeLocation)
   219  	}
   220  
   221  	if suite.phase == PhaseRun {
   222  		return types.GinkgoErrors.SuiteNodeDuringRunPhase(node.NodeType, node.CodeLocation)
   223  	}
   224  
   225  	switch node.NodeType {
   226  	case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite:
   227  		existingBefores := suite.suiteNodes.WithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
   228  		if len(existingBefores) > 0 {
   229  			return types.GinkgoErrors.MultipleBeforeSuiteNodes(node.NodeType, node.CodeLocation, existingBefores[0].NodeType, existingBefores[0].CodeLocation)
   230  		}
   231  	case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
   232  		existingAfters := suite.suiteNodes.WithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
   233  		if len(existingAfters) > 0 {
   234  			return types.GinkgoErrors.MultipleAfterSuiteNodes(node.NodeType, node.CodeLocation, existingAfters[0].NodeType, existingAfters[0].CodeLocation)
   235  		}
   236  	}
   237  
   238  	suite.suiteNodes = append(suite.suiteNodes, node)
   239  	return nil
   240  }
   241  
   242  func (suite *Suite) pushCleanupNode(node Node) error {
   243  	if suite.phase != PhaseRun || suite.currentNode.IsZero() {
   244  		return types.GinkgoErrors.PushingCleanupNodeDuringTreeConstruction(node.CodeLocation)
   245  	}
   246  
   247  	switch suite.currentNode.NodeType {
   248  	case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite, types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
   249  		node.NodeType = types.NodeTypeCleanupAfterSuite
   250  	case types.NodeTypeBeforeAll, types.NodeTypeAfterAll:
   251  		node.NodeType = types.NodeTypeCleanupAfterAll
   252  	case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportBeforeSuite, types.NodeTypeReportAfterSuite:
   253  		return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType)
   254  	case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite:
   255  		return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation)
   256  	default:
   257  		node.NodeType = types.NodeTypeCleanupAfterEach
   258  	}
   259  
   260  	node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID
   261  	node.NestingLevel = suite.currentNode.NestingLevel
   262  	suite.selectiveLock.Lock()
   263  	suite.cleanupNodes = append(suite.cleanupNodes, node)
   264  	suite.selectiveLock.Unlock()
   265  
   266  	return nil
   267  }
   268  
   269  func (suite *Suite) generateTimelineLocation() types.TimelineLocation {
   270  	suite.selectiveLock.Lock()
   271  	defer suite.selectiveLock.Unlock()
   272  
   273  	suite.timelineOrder += 1
   274  	return types.TimelineLocation{
   275  		Offset: len(suite.currentSpecReport.CapturedGinkgoWriterOutput) + suite.writer.Len(),
   276  		Order:  suite.timelineOrder,
   277  		Time:   time.Now(),
   278  	}
   279  }
   280  
   281  func (suite *Suite) handleSpecEvent(event types.SpecEvent) types.SpecEvent {
   282  	event.TimelineLocation = suite.generateTimelineLocation()
   283  	suite.selectiveLock.Lock()
   284  	suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
   285  	suite.selectiveLock.Unlock()
   286  	suite.reporter.EmitSpecEvent(event)
   287  	return event
   288  }
   289  
   290  func (suite *Suite) handleSpecEventEnd(eventType types.SpecEventType, startEvent types.SpecEvent) {
   291  	event := startEvent
   292  	event.SpecEventType = eventType
   293  	event.TimelineLocation = suite.generateTimelineLocation()
   294  	event.Duration = event.TimelineLocation.Time.Sub(startEvent.TimelineLocation.Time)
   295  	suite.selectiveLock.Lock()
   296  	suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
   297  	suite.selectiveLock.Unlock()
   298  	suite.reporter.EmitSpecEvent(event)
   299  }
   300  
   301  func (suite *Suite) By(text string, callback ...func()) error {
   302  	cl := types.NewCodeLocation(2)
   303  	if suite.phase != PhaseRun {
   304  		return types.GinkgoErrors.ByNotDuringRunPhase(cl)
   305  	}
   306  
   307  	event := suite.handleSpecEvent(types.SpecEvent{
   308  		SpecEventType: types.SpecEventByStart,
   309  		CodeLocation:  cl,
   310  		Message:       text,
   311  	})
   312  	suite.selectiveLock.Lock()
   313  	suite.currentByStep = event
   314  	suite.selectiveLock.Unlock()
   315  
   316  	if len(callback) == 1 {
   317  		defer func() {
   318  			suite.selectiveLock.Lock()
   319  			suite.currentByStep = types.SpecEvent{}
   320  			suite.selectiveLock.Unlock()
   321  			suite.handleSpecEventEnd(types.SpecEventByEnd, event)
   322  		}()
   323  		callback[0]()
   324  	} else if len(callback) > 1 {
   325  		panic("just one callback per By, please")
   326  	}
   327  	return nil
   328  }
   329  
   330  /*
   331  Spec Running methods - used during PhaseRun
   332  */
   333  func (suite *Suite) CurrentSpecReport() types.SpecReport {
   334  	suite.selectiveLock.Lock()
   335  	defer suite.selectiveLock.Unlock()
   336  	report := suite.currentSpecReport
   337  	if suite.writer != nil {
   338  		report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
   339  	}
   340  	report.ReportEntries = make([]ReportEntry, len(report.ReportEntries))
   341  	copy(report.ReportEntries, suite.currentSpecReport.ReportEntries)
   342  	return report
   343  }
   344  
   345  // Only valid in the preview context.  In general suite.report only includes
   346  // the specs run by _this_ node - it is only at the end of the suite that
   347  // the parallel reports are aggregated.  However in the preview context we run
   348  // in series and
   349  func (suite *Suite) GetPreviewReport() types.Report {
   350  	suite.selectiveLock.Lock()
   351  	defer suite.selectiveLock.Unlock()
   352  	return suite.report
   353  }
   354  
   355  func (suite *Suite) AddReportEntry(entry ReportEntry) error {
   356  	if suite.phase != PhaseRun {
   357  		return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
   358  	}
   359  	entry.TimelineLocation = suite.generateTimelineLocation()
   360  	entry.Time = entry.TimelineLocation.Time
   361  	suite.selectiveLock.Lock()
   362  	suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry)
   363  	suite.selectiveLock.Unlock()
   364  	suite.reporter.EmitReportEntry(entry)
   365  	return nil
   366  }
   367  
   368  func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport {
   369  	timelineLocation := suite.generateTimelineLocation()
   370  	suite.selectiveLock.Lock()
   371  	defer suite.selectiveLock.Unlock()
   372  
   373  	deadline, cancel := context.WithTimeout(context.Background(), PROGRESS_REPORTER_DEADLING)
   374  	defer cancel()
   375  	var additionalReports []string
   376  	if suite.currentSpecContext != nil {
   377  		additionalReports = append(additionalReports, suite.currentSpecContext.QueryProgressReporters(deadline, suite.failer)...)
   378  	}
   379  	additionalReports = append(additionalReports, suite.QueryProgressReporters(deadline, suite.failer)...)
   380  	gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes())
   381  	pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, suite.currentByStep, gwOutput, timelineLocation, additionalReports, suite.config.SourceRoots, fullReport)
   382  
   383  	if err != nil {
   384  		fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error())
   385  	}
   386  	return pr
   387  }
   388  
   389  func (suite *Suite) handleProgressSignal() {
   390  	report := suite.generateProgressReport(false)
   391  	report.Message = "{{bold}}You've requested a progress report:{{/}}"
   392  	suite.emitProgressReport(report)
   393  }
   394  
   395  func (suite *Suite) emitProgressReport(report types.ProgressReport) {
   396  	suite.selectiveLock.Lock()
   397  	suite.currentSpecReport.ProgressReports = append(suite.currentSpecReport.ProgressReports, report.WithoutCapturedGinkgoWriterOutput())
   398  	suite.selectiveLock.Unlock()
   399  
   400  	suite.reporter.EmitProgressReport(report)
   401  	if suite.isRunningInParallel() {
   402  		err := suite.client.PostEmitProgressReport(report)
   403  		if err != nil {
   404  			fmt.Println(err.Error())
   405  		}
   406  	}
   407  }
   408  
   409  func (suite *Suite) isRunningInParallel() bool {
   410  	return suite.config.ParallelTotal > 1
   411  }
   412  
   413  func (suite *Suite) processCurrentSpecReport() {
   414  	suite.reporter.DidRun(suite.currentSpecReport)
   415  	if suite.isRunningInParallel() {
   416  		suite.client.PostDidRun(suite.currentSpecReport)
   417  	}
   418  	suite.report.SpecReports = append(suite.report.SpecReports, suite.currentSpecReport)
   419  
   420  	if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
   421  		suite.report.SuiteSucceeded = false
   422  		if suite.config.FailFast || suite.currentSpecReport.State.Is(types.SpecStateAborted) {
   423  			suite.skipAll = true
   424  			if suite.isRunningInParallel() {
   425  				suite.client.PostAbort()
   426  			}
   427  		}
   428  	}
   429  }
   430  
   431  func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool {
   432  	numSpecsThatWillBeRun := specs.CountWithoutSkip()
   433  
   434  	suite.report = types.Report{
   435  		SuitePath:                 suitePath,
   436  		SuiteDescription:          description,
   437  		SuiteLabels:               suiteLabels,
   438  		SuiteConfig:               suite.config,
   439  		SuiteHasProgrammaticFocus: hasProgrammaticFocus,
   440  		PreRunStats: types.PreRunStats{
   441  			TotalSpecs:       len(specs),
   442  			SpecsThatWillRun: numSpecsThatWillBeRun,
   443  		},
   444  		StartTime: time.Now(),
   445  	}
   446  
   447  	suite.reporter.SuiteWillBegin(suite.report)
   448  	if suite.isRunningInParallel() {
   449  		suite.client.PostSuiteWillBegin(suite.report)
   450  	}
   451  
   452  	suite.report.SuiteSucceeded = true
   453  
   454  	suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportBeforeSuite)
   455  
   456  	ranBeforeSuite := suite.report.SuiteSucceeded
   457  	if suite.report.SuiteSucceeded {
   458  		suite.runBeforeSuite(numSpecsThatWillBeRun)
   459  	}
   460  
   461  	if suite.report.SuiteSucceeded {
   462  		groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config)
   463  		nextIndex := MakeIncrementingIndexCounter()
   464  		if suite.isRunningInParallel() {
   465  			nextIndex = suite.client.FetchNextCounter
   466  		}
   467  
   468  		for {
   469  			groupedSpecIdx, err := nextIndex()
   470  			if err != nil {
   471  				suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, fmt.Sprintf("Failed to iterate over specs:\n%s", err.Error()))
   472  				suite.report.SuiteSucceeded = false
   473  				break
   474  			}
   475  
   476  			if groupedSpecIdx >= len(groupedSpecIndices) {
   477  				if suite.config.ParallelProcess == 1 && len(serialGroupedSpecIndices) > 0 {
   478  					groupedSpecIndices, serialGroupedSpecIndices, nextIndex = serialGroupedSpecIndices, GroupedSpecIndices{}, MakeIncrementingIndexCounter()
   479  					suite.client.BlockUntilNonprimaryProcsHaveFinished()
   480  					continue
   481  				}
   482  				break
   483  			}
   484  
   485  			// the complexity for running groups of specs is very high because of Ordered containers and FlakeAttempts
   486  			// we encapsulate that complexity in the notion of a Group that can run
   487  			// Group is really just an extension of suite so it gets passed a suite and has access to all its internals
   488  			// Note that group is stateful and intended for single use!
   489  			newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx]))
   490  		}
   491  
   492  		if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending {
   493  			suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set")
   494  			suite.report.SuiteSucceeded = false
   495  		}
   496  	}
   497  
   498  	if ranBeforeSuite {
   499  		suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
   500  	}
   501  
   502  	interruptStatus := suite.interruptHandler.Status()
   503  	if interruptStatus.Interrupted() {
   504  		suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String())
   505  		suite.report.SuiteSucceeded = false
   506  	}
   507  	suite.report.EndTime = time.Now()
   508  	suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime)
   509  	if !suite.deadline.IsZero() && suite.report.EndTime.After(suite.deadline) {
   510  		suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite Timeout Elapsed")
   511  		suite.report.SuiteSucceeded = false
   512  	}
   513  
   514  	suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportAfterSuite)
   515  	suite.reporter.SuiteDidEnd(suite.report)
   516  	if suite.isRunningInParallel() {
   517  		suite.client.PostSuiteDidEnd(suite.report)
   518  	}
   519  
   520  	return suite.report.SuiteSucceeded
   521  }
   522  
   523  func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
   524  	beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
   525  	if !beforeSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
   526  		suite.selectiveLock.Lock()
   527  		suite.currentSpecReport = types.SpecReport{
   528  			LeafNodeType:      beforeSuiteNode.NodeType,
   529  			LeafNodeLocation:  beforeSuiteNode.CodeLocation,
   530  			ParallelProcess:   suite.config.ParallelProcess,
   531  			RunningInParallel: suite.isRunningInParallel(),
   532  		}
   533  		suite.selectiveLock.Unlock()
   534  
   535  		suite.reporter.WillRun(suite.currentSpecReport)
   536  		suite.runSuiteNode(beforeSuiteNode)
   537  		if suite.currentSpecReport.State.Is(types.SpecStateSkipped) {
   538  			suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite")
   539  			suite.skipAll = true
   540  		}
   541  		suite.processCurrentSpecReport()
   542  	}
   543  }
   544  
   545  func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
   546  	afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
   547  	if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
   548  		suite.selectiveLock.Lock()
   549  		suite.currentSpecReport = types.SpecReport{
   550  			LeafNodeType:      afterSuiteNode.NodeType,
   551  			LeafNodeLocation:  afterSuiteNode.CodeLocation,
   552  			ParallelProcess:   suite.config.ParallelProcess,
   553  			RunningInParallel: suite.isRunningInParallel(),
   554  		}
   555  		suite.selectiveLock.Unlock()
   556  
   557  		suite.reporter.WillRun(suite.currentSpecReport)
   558  		suite.runSuiteNode(afterSuiteNode)
   559  		suite.processCurrentSpecReport()
   560  	}
   561  
   562  	afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse()
   563  	if len(afterSuiteCleanup) > 0 {
   564  		for _, cleanupNode := range afterSuiteCleanup {
   565  			suite.selectiveLock.Lock()
   566  			suite.currentSpecReport = types.SpecReport{
   567  				LeafNodeType:      cleanupNode.NodeType,
   568  				LeafNodeLocation:  cleanupNode.CodeLocation,
   569  				ParallelProcess:   suite.config.ParallelProcess,
   570  				RunningInParallel: suite.isRunningInParallel(),
   571  			}
   572  			suite.selectiveLock.Unlock()
   573  
   574  			suite.reporter.WillRun(suite.currentSpecReport)
   575  			suite.runSuiteNode(cleanupNode)
   576  			suite.processCurrentSpecReport()
   577  		}
   578  	}
   579  }
   580  
   581  func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
   582  	nodes := spec.Nodes.WithType(nodeType)
   583  	if nodeType == types.NodeTypeReportAfterEach {
   584  		nodes = nodes.SortedByDescendingNestingLevel()
   585  	}
   586  	if nodeType == types.NodeTypeReportBeforeEach {
   587  		nodes = nodes.SortedByAscendingNestingLevel()
   588  	}
   589  	if len(nodes) == 0 {
   590  		return
   591  	}
   592  
   593  	for i := range nodes {
   594  		suite.writer.Truncate()
   595  		suite.outputInterceptor.StartInterceptingOutput()
   596  		report := suite.currentSpecReport
   597  		nodes[i].Body = func(ctx SpecContext) {
   598  			nodes[i].ReportEachBody(ctx, report)
   599  		}
   600  		state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i]))
   601  
   602  		// If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state.
   603  		// Also, if the reporter is every aborted - always override the state to propagate the abort
   604  		if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) {
   605  			suite.currentSpecReport.State = state
   606  			suite.currentSpecReport.Failure = failure
   607  		}
   608  		suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes())
   609  		suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
   610  	}
   611  }
   612  
   613  func (suite *Suite) runSuiteNode(node Node) {
   614  	if suite.config.DryRun {
   615  		suite.currentSpecReport.State = types.SpecStatePassed
   616  		return
   617  	}
   618  
   619  	suite.writer.Truncate()
   620  	suite.outputInterceptor.StartInterceptingOutput()
   621  	suite.currentSpecReport.StartTime = time.Now()
   622  
   623  	var err error
   624  	switch node.NodeType {
   625  	case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite:
   626  		suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
   627  	case types.NodeTypeCleanupAfterSuite:
   628  		if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 {
   629  			err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
   630  		}
   631  		if err == nil {
   632  			suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
   633  		}
   634  	case types.NodeTypeSynchronizedBeforeSuite:
   635  		var data []byte
   636  		var runAllProcs bool
   637  		if suite.config.ParallelProcess == 1 {
   638  			if suite.config.ParallelTotal > 1 {
   639  				suite.outputInterceptor.StopInterceptingAndReturnOutput()
   640  				suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
   641  			}
   642  			node.Body = func(c SpecContext) { data = node.SynchronizedBeforeSuiteProc1Body(c) }
   643  			node.HasContext = node.SynchronizedBeforeSuiteProc1BodyHasContext
   644  			suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
   645  			if suite.config.ParallelTotal > 1 {
   646  				suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
   647  				suite.outputInterceptor.StartInterceptingOutput()
   648  				if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
   649  					err = suite.client.PostSynchronizedBeforeSuiteCompleted(types.SpecStatePassed, data)
   650  				} else {
   651  					err = suite.client.PostSynchronizedBeforeSuiteCompleted(suite.currentSpecReport.State, nil)
   652  				}
   653  			}
   654  			runAllProcs = suite.currentSpecReport.State.Is(types.SpecStatePassed) && err == nil
   655  		} else {
   656  			var proc1State types.SpecState
   657  			proc1State, data, err = suite.client.BlockUntilSynchronizedBeforeSuiteData()
   658  			switch proc1State {
   659  			case types.SpecStatePassed:
   660  				runAllProcs = true
   661  			case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout:
   662  				err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1()
   663  			case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped:
   664  				suite.currentSpecReport.State = proc1State
   665  			}
   666  		}
   667  		if runAllProcs {
   668  			node.Body = func(c SpecContext) { node.SynchronizedBeforeSuiteAllProcsBody(c, data) }
   669  			node.HasContext = node.SynchronizedBeforeSuiteAllProcsBodyHasContext
   670  			suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
   671  		}
   672  	case types.NodeTypeSynchronizedAfterSuite:
   673  		node.Body = node.SynchronizedAfterSuiteAllProcsBody
   674  		node.HasContext = node.SynchronizedAfterSuiteAllProcsBodyHasContext
   675  		suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
   676  		if suite.config.ParallelProcess == 1 {
   677  			if suite.config.ParallelTotal > 1 {
   678  				err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
   679  			}
   680  			if err == nil {
   681  				if suite.config.ParallelTotal > 1 {
   682  					suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
   683  					suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
   684  				}
   685  
   686  				node.Body = node.SynchronizedAfterSuiteProc1Body
   687  				node.HasContext = node.SynchronizedAfterSuiteProc1BodyHasContext
   688  				state, failure := suite.runNode(node, time.Time{}, "")
   689  				if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
   690  					suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure
   691  				}
   692  			}
   693  		}
   694  	}
   695  
   696  	if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
   697  		suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
   698  		suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
   699  	}
   700  
   701  	suite.currentSpecReport.EndTime = time.Now()
   702  	suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
   703  	suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
   704  	suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
   705  }
   706  
   707  func (suite *Suite) runReportSuiteNodesIfNeedBe(nodeType types.NodeType) {
   708  	nodes := suite.suiteNodes.WithType(nodeType)
   709  	// only run ReportAfterSuite on proc 1
   710  	if nodeType.Is(types.NodeTypeReportAfterSuite) && suite.config.ParallelProcess != 1 {
   711  		return
   712  	}
   713  	// if we're running ReportBeforeSuite on proc > 1 - we should wait until proc 1 has completed
   714  	if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.config.ParallelProcess != 1 && len(nodes) > 0 {
   715  		state, err := suite.client.BlockUntilReportBeforeSuiteCompleted()
   716  		if err != nil || state.Is(types.SpecStateFailed) {
   717  			suite.report.SuiteSucceeded = false
   718  		}
   719  		return
   720  	}
   721  
   722  	for _, node := range nodes {
   723  		suite.selectiveLock.Lock()
   724  		suite.currentSpecReport = types.SpecReport{
   725  			LeafNodeType:      node.NodeType,
   726  			LeafNodeLocation:  node.CodeLocation,
   727  			LeafNodeText:      node.Text,
   728  			ParallelProcess:   suite.config.ParallelProcess,
   729  			RunningInParallel: suite.isRunningInParallel(),
   730  		}
   731  		suite.selectiveLock.Unlock()
   732  
   733  		suite.reporter.WillRun(suite.currentSpecReport)
   734  		suite.runReportSuiteNode(node, suite.report)
   735  		suite.processCurrentSpecReport()
   736  	}
   737  
   738  	// if we're running ReportBeforeSuite and we're running in parallel - we shuld tell the other procs that we're done
   739  	if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.isRunningInParallel() && len(nodes) > 0 {
   740  		if suite.report.SuiteSucceeded {
   741  			suite.client.PostReportBeforeSuiteCompleted(types.SpecStatePassed)
   742  		} else {
   743  			suite.client.PostReportBeforeSuiteCompleted(types.SpecStateFailed)
   744  		}
   745  	}
   746  }
   747  
   748  func (suite *Suite) runReportSuiteNode(node Node, report types.Report) {
   749  	suite.writer.Truncate()
   750  	suite.outputInterceptor.StartInterceptingOutput()
   751  	suite.currentSpecReport.StartTime = time.Now()
   752  
   753  	// if we're running a ReportAfterSuite in parallel (on proc 1) we (a) wait until other procs have exited and
   754  	// (b) always fetch the latest report as prior ReportAfterSuites will contribute to it
   755  	if node.NodeType.Is(types.NodeTypeReportAfterSuite) && suite.isRunningInParallel() {
   756  		aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport()
   757  		if err != nil {
   758  			suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
   759  			suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
   760  			return
   761  		}
   762  		report = report.Add(aggregatedReport)
   763  	}
   764  
   765  	node.Body = func(ctx SpecContext) { node.ReportSuiteBody(ctx, report) }
   766  	suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
   767  
   768  	suite.currentSpecReport.EndTime = time.Now()
   769  	suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
   770  	suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
   771  	suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput()
   772  }
   773  
   774  func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) {
   775  	if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
   776  		suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node)
   777  	}
   778  
   779  	interruptStatus := suite.interruptHandler.Status()
   780  	if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
   781  		return types.SpecStateSkipped, types.Failure{}
   782  	}
   783  	if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt) {
   784  		return types.SpecStateSkipped, types.Failure{}
   785  	}
   786  	if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) {
   787  		return types.SpecStateSkipped, types.Failure{}
   788  	}
   789  
   790  	suite.selectiveLock.Lock()
   791  	suite.currentNode = node
   792  	suite.currentNodeStartTime = time.Now()
   793  	suite.currentByStep = types.SpecEvent{}
   794  	suite.selectiveLock.Unlock()
   795  	defer func() {
   796  		suite.selectiveLock.Lock()
   797  		suite.currentNode = Node{}
   798  		suite.currentNodeStartTime = time.Time{}
   799  		suite.selectiveLock.Unlock()
   800  	}()
   801  
   802  	if text == "" {
   803  		text = "TOP-LEVEL"
   804  	}
   805  	event := suite.handleSpecEvent(types.SpecEvent{
   806  		SpecEventType: types.SpecEventNodeStart,
   807  		NodeType:      node.NodeType,
   808  		Message:       text,
   809  		CodeLocation:  node.CodeLocation,
   810  	})
   811  	defer func() {
   812  		suite.handleSpecEventEnd(types.SpecEventNodeEnd, event)
   813  	}()
   814  
   815  	var failure types.Failure
   816  	failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation
   817  	if node.NodeType.Is(types.NodeTypeIt) || node.NodeType.Is(types.NodeTypesForSuiteLevelNodes) {
   818  		failure.FailureNodeContext = types.FailureNodeIsLeafNode
   819  	} else if node.NestingLevel <= 0 {
   820  		failure.FailureNodeContext = types.FailureNodeAtTopLevel
   821  	} else {
   822  		failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1
   823  	}
   824  	var outcome types.SpecState
   825  
   826  	gracePeriod := suite.config.GracePeriod
   827  	if node.GracePeriod >= 0 {
   828  		gracePeriod = node.GracePeriod
   829  	}
   830  
   831  	now := time.Now()
   832  	deadline := suite.deadline
   833  	timeoutInPlay := "suite"
   834  	if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) {
   835  		deadline = specDeadline
   836  		timeoutInPlay = "spec"
   837  	}
   838  	if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) {
   839  		deadline = now.Add(node.NodeTimeout)
   840  		timeoutInPlay = "node"
   841  	}
   842  	if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() {
   843  		// we're out of time already.  let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
   844  		if node.NodeTimeout > 0 {
   845  			deadline = now.Add(node.NodeTimeout)
   846  			timeoutInPlay = "node"
   847  		} else {
   848  			deadline = now.Add(gracePeriod)
   849  			timeoutInPlay = "grace period"
   850  		}
   851  	}
   852  
   853  	if !node.HasContext {
   854  		// this maps onto the pre-context behavior:
   855  		// - an interrupted node exits immediately.  with this, context-less nodes that are in a spec with a SpecTimeout and/or are interrupted by other means will simply exit immediately after the timeout/interrupt
   856  		// - clean up nodes have up to GracePeriod (formerly hard-coded at 30s) to complete before they are interrupted
   857  		gracePeriod = 0
   858  	}
   859  
   860  	sc := NewSpecContext(suite)
   861  	defer sc.cancel(fmt.Errorf("spec has finished"))
   862  
   863  	suite.selectiveLock.Lock()
   864  	suite.currentSpecContext = sc
   865  	suite.selectiveLock.Unlock()
   866  
   867  	var deadlineChannel <-chan time.Time
   868  	if !deadline.IsZero() {
   869  		deadlineChannel = time.After(deadline.Sub(now))
   870  	}
   871  	var gracePeriodChannel <-chan time.Time
   872  
   873  	outcomeC := make(chan types.SpecState)
   874  	failureC := make(chan types.Failure)
   875  
   876  	go func() {
   877  		finished := false
   878  		defer func() {
   879  			if e := recover(); e != nil || !finished {
   880  				suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e)
   881  			}
   882  
   883  			outcomeFromRun, failureFromRun := suite.failer.Drain()
   884  			failureFromRun.TimelineLocation = suite.generateTimelineLocation()
   885  			outcomeC <- outcomeFromRun
   886  			failureC <- failureFromRun
   887  		}()
   888  
   889  		node.Body(sc)
   890  		finished = true
   891  	}()
   892  
   893  	// progress polling timer and channel
   894  	var emitProgressNow <-chan time.Time
   895  	var progressPoller *time.Timer
   896  	var pollProgressAfter, pollProgressInterval = suite.config.PollProgressAfter, suite.config.PollProgressInterval
   897  	if node.PollProgressAfter >= 0 {
   898  		pollProgressAfter = node.PollProgressAfter
   899  	}
   900  	if node.PollProgressInterval >= 0 {
   901  		pollProgressInterval = node.PollProgressInterval
   902  	}
   903  	if pollProgressAfter > 0 {
   904  		progressPoller = time.NewTimer(pollProgressAfter)
   905  		emitProgressNow = progressPoller.C
   906  		defer progressPoller.Stop()
   907  	}
   908  
   909  	// now we wait for an outcome, an interrupt, a timeout, or a progress poll
   910  	for {
   911  		select {
   912  		case outcomeFromRun := <-outcomeC:
   913  			failureFromRun := <-failureC
   914  			if outcome.Is(types.SpecStateInterrupted | types.SpecStateTimedout) {
   915  				// we've already been interrupted/timed out.  we just managed to actually exit
   916  				// before the grace period elapsed
   917  				// if we have a failure message we attach it as an additional failure
   918  				if outcomeFromRun != types.SpecStatePassed {
   919  					additionalFailure := types.AdditionalFailure{
   920  						State:   outcomeFromRun,
   921  						Failure: failure, // we make a copy - this will include all the configuration set up above...
   922  					}
   923  					// ...and then we update the failure with the details from failureFromRun
   924  					additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
   925  					additionalFailure.Failure.ProgressReport = types.ProgressReport{}
   926  					if outcome == types.SpecStateTimedout {
   927  						additionalFailure.Failure.Message = fmt.Sprintf("A %s timeout occurred and then the following failure was recorded in the timedout node before it exited:\n%s", timeoutInPlay, failureFromRun.Message)
   928  					} else {
   929  						additionalFailure.Failure.Message = fmt.Sprintf("An interrupt occurred and then the following failure was recorded in the interrupted node before it exited:\n%s", failureFromRun.Message)
   930  					}
   931  					suite.reporter.EmitFailure(additionalFailure.State, additionalFailure.Failure)
   932  					failure.AdditionalFailure = &additionalFailure
   933  				}
   934  				return outcome, failure
   935  			}
   936  			if outcomeFromRun.Is(types.SpecStatePassed) {
   937  				return outcomeFromRun, types.Failure{}
   938  			} else {
   939  				failure.Message, failure.Location, failure.ForwardedPanic, failure.TimelineLocation = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
   940  				suite.reporter.EmitFailure(outcomeFromRun, failure)
   941  				return outcomeFromRun, failure
   942  			}
   943  		case <-gracePeriodChannel:
   944  			if node.HasContext && outcome.Is(types.SpecStateTimedout) {
   945  				report := suite.generateProgressReport(false)
   946  				report.Message = "{{bold}}{{orange}}A running node failed to exit in time{{/}}\nGinkgo is moving on but a node has timed out and failed to exit before its grace period elapsed.  The node has now leaked and is running in the background.\nHere's a current progress report:"
   947  				suite.emitProgressReport(report)
   948  			}
   949  			return outcome, failure
   950  		case <-deadlineChannel:
   951  			// we're out of time - the outcome is a timeout and we capture the failure and progress report
   952  			outcome = types.SpecStateTimedout
   953  			failure.Message, failure.Location, failure.TimelineLocation = fmt.Sprintf("A %s timeout occurred", timeoutInPlay), node.CodeLocation, suite.generateTimelineLocation()
   954  			failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput()
   955  			failure.ProgressReport.Message = fmt.Sprintf("{{bold}}This is the Progress Report generated when the %s timeout occurred:{{/}}", timeoutInPlay)
   956  			deadlineChannel = nil
   957  			suite.reporter.EmitFailure(outcome, failure)
   958  
   959  			// tell the spec to stop.  it's important we generate the progress report first to make sure we capture where
   960  			// the spec is actually stuck
   961  			sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay))
   962  			// and now we wait for the grace period
   963  			gracePeriodChannel = time.After(gracePeriod)
   964  		case <-interruptStatus.Channel:
   965  			interruptStatus = suite.interruptHandler.Status()
   966  			// ignore interruption from other process if we are cleaning up or reporting
   967  			if interruptStatus.Cause == interrupt_handler.InterruptCauseAbortByOtherProcess &&
   968  				node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) {
   969  				continue
   970  			}
   971  
   972  			deadlineChannel = nil // don't worry about deadlines, time's up now
   973  
   974  			failureTimelineLocation := suite.generateTimelineLocation()
   975  			progressReport := suite.generateProgressReport(true)
   976  
   977  			if outcome == types.SpecStateInvalid {
   978  				outcome = types.SpecStateInterrupted
   979  				failure.Message, failure.Location, failure.TimelineLocation = interruptStatus.Message(), node.CodeLocation, failureTimelineLocation
   980  				if interruptStatus.ShouldIncludeProgressReport() {
   981  					failure.ProgressReport = progressReport.WithoutCapturedGinkgoWriterOutput()
   982  					failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}"
   983  				}
   984  				suite.reporter.EmitFailure(outcome, failure)
   985  			}
   986  
   987  			progressReport = progressReport.WithoutOtherGoroutines()
   988  			sc.cancel(fmt.Errorf(interruptStatus.Message()))
   989  
   990  			if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
   991  				if interruptStatus.ShouldIncludeProgressReport() {
   992  					progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message())
   993  					suite.emitProgressReport(progressReport)
   994  				}
   995  				return outcome, failure
   996  			}
   997  			if interruptStatus.ShouldIncludeProgressReport() {
   998  				if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport {
   999  					progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs.  {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message())
  1000  				} else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly {
  1001  					progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes.  {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message())
  1002  				}
  1003  				suite.emitProgressReport(progressReport)
  1004  			}
  1005  
  1006  			if gracePeriodChannel == nil {
  1007  				// we haven't given grace yet... so let's
  1008  				gracePeriodChannel = time.After(gracePeriod)
  1009  			} else {
  1010  				// we've already given grace.  time's up.  now.
  1011  				return outcome, failure
  1012  			}
  1013  		case <-emitProgressNow:
  1014  			report := suite.generateProgressReport(false)
  1015  			report.Message = "{{bold}}Automatically polling progress:{{/}}"
  1016  			suite.emitProgressReport(report)
  1017  			if pollProgressInterval > 0 {
  1018  				progressPoller.Reset(pollProgressInterval)
  1019  			}
  1020  		}
  1021  	}
  1022  }
  1023  
  1024  // TODO: search for usages and consider if reporter.EmitFailure() is necessary
  1025  func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure {
  1026  	return types.Failure{
  1027  		Message:             message,
  1028  		Location:            node.CodeLocation,
  1029  		TimelineLocation:    suite.generateTimelineLocation(),
  1030  		FailureNodeContext:  types.FailureNodeIsLeafNode,
  1031  		FailureNodeType:     node.NodeType,
  1032  		FailureNodeLocation: node.CodeLocation,
  1033  	}
  1034  }
  1035  
  1036  func max(a, b int) int {
  1037  	if a > b {
  1038  		return a
  1039  	}
  1040  	return b
  1041  }
  1042  

View as plain text