...

Source file src/github.com/onsi/ginkgo/v2/internal/ordering.go

Documentation: github.com/onsi/ginkgo/v2/internal

     1  package internal
     2  
     3  import (
     4  	"math/rand"
     5  	"sort"
     6  
     7  	"github.com/onsi/ginkgo/v2/types"
     8  )
     9  
    10  type SortableSpecs struct {
    11  	Specs   Specs
    12  	Indexes []int
    13  }
    14  
    15  func NewSortableSpecs(specs Specs) *SortableSpecs {
    16  	indexes := make([]int, len(specs))
    17  	for i := range specs {
    18  		indexes[i] = i
    19  	}
    20  	return &SortableSpecs{
    21  		Specs:   specs,
    22  		Indexes: indexes,
    23  	}
    24  }
    25  func (s *SortableSpecs) Len() int      { return len(s.Indexes) }
    26  func (s *SortableSpecs) Swap(i, j int) { s.Indexes[i], s.Indexes[j] = s.Indexes[j], s.Indexes[i] }
    27  func (s *SortableSpecs) Less(i, j int) bool {
    28  	a, b := s.Specs[s.Indexes[i]], s.Specs[s.Indexes[j]]
    29  
    30  	aNodes, bNodes := a.Nodes.WithType(types.NodeTypesForContainerAndIt), b.Nodes.WithType(types.NodeTypesForContainerAndIt)
    31  
    32  	firstOrderedAIdx, firstOrderedBIdx := aNodes.IndexOfFirstNodeMarkedOrdered(), bNodes.IndexOfFirstNodeMarkedOrdered()
    33  	if firstOrderedAIdx > -1 && firstOrderedBIdx > -1 && aNodes[firstOrderedAIdx].ID == bNodes[firstOrderedBIdx].ID {
    34  		// strictly preserve order within an ordered containers.  ID will track this as IDs are generated monotonically
    35  		return aNodes.FirstNodeWithType(types.NodeTypeIt).ID < bNodes.FirstNodeWithType(types.NodeTypeIt).ID
    36  	}
    37  
    38  	// if either spec is in an ordered container - only use the nodes up to the outermost ordered container
    39  	if firstOrderedAIdx > -1 {
    40  		aNodes = aNodes[:firstOrderedAIdx+1]
    41  	}
    42  	if firstOrderedBIdx > -1 {
    43  		bNodes = bNodes[:firstOrderedBIdx+1]
    44  	}
    45  
    46  	for i := 0; i < len(aNodes) && i < len(bNodes); i++ {
    47  		aCL, bCL := aNodes[i].CodeLocation, bNodes[i].CodeLocation
    48  		if aCL.FileName != bCL.FileName {
    49  			return aCL.FileName < bCL.FileName
    50  		}
    51  		if aCL.LineNumber != bCL.LineNumber {
    52  			return aCL.LineNumber < bCL.LineNumber
    53  		}
    54  	}
    55  	// either everything is equal or we have different lengths of CLs
    56  	if len(aNodes) != len(bNodes) {
    57  		return len(aNodes) < len(bNodes)
    58  	}
    59  	// ok, now we are sure everything was equal. so we use the spec text to break ties
    60  	for i := 0; i < len(aNodes); i++ {
    61  		if aNodes[i].Text != bNodes[i].Text {
    62  			return aNodes[i].Text < bNodes[i].Text
    63  		}
    64  	}
    65  	// ok, all those texts were equal.  we'll use the ID of the most deeply nested node as a last resort
    66  	return aNodes[len(aNodes)-1].ID < bNodes[len(bNodes)-1].ID
    67  }
    68  
    69  type GroupedSpecIndices []SpecIndices
    70  type SpecIndices []int
    71  
    72  func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, GroupedSpecIndices) {
    73  	/*
    74  		Ginkgo has sophisticated support for randomizing specs.  Specs are guaranteed to have the same
    75  		order for a given seed across test runs.
    76  
    77  		By default only top-level containers and specs are shuffled - this makes for a more intuitive debugging
    78  		experience - specs within a given container run in the order they appear in the file.
    79  
    80  		Developers can set -randomizeAllSpecs to shuffle _all_ specs.
    81  
    82  		In addition, spec containers can be marked as Ordered.  Specs within an Ordered container are never shuffled.
    83  
    84  		Finally, specs and spec containers can be marked as Serial.  When running in parallel, serial specs run on Process #1 _after_ all other processes have finished.
    85  	*/
    86  
    87  	// Seed a new random source based on thee configured random seed.
    88  	r := rand.New(rand.NewSource(suiteConfig.RandomSeed))
    89  
    90  	// first, we sort the entire suite to ensure a deterministic order.  the sort is performed by filename, then line number, and then spec text.  this ensures every parallel process has the exact same spec order and is only necessary to cover the edge case where the user iterates over a map to generate specs.
    91  	sortableSpecs := NewSortableSpecs(specs)
    92  	sort.Sort(sortableSpecs)
    93  
    94  	// then we break things into execution groups
    95  	// a group represents a single unit of execution and is a collection of SpecIndices
    96  	// usually a group is just a single spec, however ordered containers must be preserved as a single group
    97  	executionGroupIDs := []uint{}
    98  	executionGroups := map[uint]SpecIndices{}
    99  	for _, idx := range sortableSpecs.Indexes {
   100  		spec := specs[idx]
   101  		groupNode := spec.Nodes.FirstNodeMarkedOrdered()
   102  		if groupNode.IsZero() {
   103  			groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt)
   104  		}
   105  		executionGroups[groupNode.ID] = append(executionGroups[groupNode.ID], idx)
   106  		if len(executionGroups[groupNode.ID]) == 1 {
   107  			executionGroupIDs = append(executionGroupIDs, groupNode.ID)
   108  		}
   109  	}
   110  
   111  	// now, we only shuffle all the execution groups if we're randomizing all specs, otherwise
   112  	// we shuffle outermost containers.  so we need to form shufflable groupings of GroupIDs
   113  	shufflableGroupingIDs := []uint{}
   114  	shufflableGroupingIDToGroupIDs := map[uint][]uint{}
   115  
   116  	// for each execution group we're going to have to pick a node to represent how the
   117  	// execution group is grouped for shuffling:
   118  	nodeTypesToShuffle := types.NodeTypesForContainerAndIt
   119  	if suiteConfig.RandomizeAllSpecs {
   120  		nodeTypesToShuffle = types.NodeTypeIt
   121  	}
   122  
   123  	//so, for each execution group:
   124  	for _, groupID := range executionGroupIDs {
   125  		// pick out a representative spec
   126  		representativeSpec := specs[executionGroups[groupID][0]]
   127  
   128  		// and grab the node on the spec that will represent which shufflable group this execution group belongs tu
   129  		shufflableGroupingNode := representativeSpec.Nodes.FirstNodeWithType(nodeTypesToShuffle)
   130  
   131  		//add the execution group to its shufflable group
   132  		shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID] = append(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID], groupID)
   133  
   134  		//and if it's the first one in
   135  		if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 {
   136  			// record the shuffleable group ID
   137  			shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID)
   138  		}
   139  	}
   140  
   141  	// now we permute the sorted shufflable grouping IDs and build the ordered Groups
   142  	orderedGroups := GroupedSpecIndices{}
   143  	permutation := r.Perm(len(shufflableGroupingIDs))
   144  	for _, j := range permutation {
   145  		//let's get the execution group IDs for this shufflable group:
   146  		executionGroupIDsForJ := shufflableGroupingIDToGroupIDs[shufflableGroupingIDs[j]]
   147  		// and we'll add their associated specindices to the orderedGroups slice:
   148  		for _, executionGroupID := range executionGroupIDsForJ {
   149  			orderedGroups = append(orderedGroups, executionGroups[executionGroupID])
   150  		}
   151  	}
   152  
   153  	// If we're running in series, we're done.
   154  	if suiteConfig.ParallelTotal == 1 {
   155  		return orderedGroups, GroupedSpecIndices{}
   156  	}
   157  
   158  	// We're running in parallel so we need to partition the ordered groups into a parallelizable set and a serialized set.
   159  	// The parallelizable groups will run across all Ginkgo processes...
   160  	// ...the serial groups will only run on Process #1 after all other processes have exited.
   161  	parallelizableGroups, serialGroups := GroupedSpecIndices{}, GroupedSpecIndices{}
   162  	for _, specIndices := range orderedGroups {
   163  		if specs[specIndices[0]].Nodes.HasNodeMarkedSerial() {
   164  			serialGroups = append(serialGroups, specIndices)
   165  		} else {
   166  			parallelizableGroups = append(parallelizableGroups, specIndices)
   167  		}
   168  	}
   169  
   170  	return parallelizableGroups, serialGroups
   171  }
   172  

View as plain text