...

Source file src/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go

Documentation: github.com/Microsoft/hcsshim/internal/hcsoci

     1  //go:build windows
     2  // +build windows
     3  
     4  package hcsoci
     5  
     6  import (
     7  	"context"
     8  	"errors"
     9  	"fmt"
    10  	"math"
    11  	"path/filepath"
    12  	"regexp"
    13  	"strconv"
    14  	"strings"
    15  
    16  	"github.com/Microsoft/go-winio/pkg/fs"
    17  	specs "github.com/opencontainers/runtime-spec/specs-go"
    18  	"github.com/sirupsen/logrus"
    19  
    20  	"github.com/Microsoft/hcsshim/internal/guestpath"
    21  	"github.com/Microsoft/hcsshim/internal/hcs/schema1"
    22  	hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
    23  	"github.com/Microsoft/hcsshim/internal/layers"
    24  	"github.com/Microsoft/hcsshim/internal/log"
    25  	"github.com/Microsoft/hcsshim/internal/oci"
    26  	"github.com/Microsoft/hcsshim/internal/processorinfo"
    27  	"github.com/Microsoft/hcsshim/internal/uvm"
    28  	"github.com/Microsoft/hcsshim/internal/uvmfolder"
    29  	"github.com/Microsoft/hcsshim/internal/wclayer"
    30  	"github.com/Microsoft/hcsshim/osversion"
    31  	"github.com/Microsoft/hcsshim/pkg/annotations"
    32  )
    33  
    34  const createContainerSubdirectoryForProcessDumpSuffix = "{container_id}"
    35  
    36  // A simple wrapper struct around the container mount configs that should be added to the
    37  // container.
    38  type mountsConfig struct {
    39  	mdsv1 []schema1.MappedDir
    40  	mpsv1 []schema1.MappedPipe
    41  	mdsv2 []hcsschema.MappedDirectory
    42  	mpsv2 []hcsschema.MappedPipe
    43  }
    44  
    45  func createMountsConfig(ctx context.Context, coi *createOptionsInternal) (*mountsConfig, error) {
    46  	// Add the mounts as mapped directories or mapped pipes
    47  	// TODO: Mapped pipes to add in v2 schema.
    48  	var config mountsConfig
    49  	for _, mount := range coi.Spec.Mounts {
    50  		if uvm.IsPipe(mount.Source) {
    51  			src, dst := uvm.GetContainerPipeMapping(coi.HostingSystem, mount)
    52  			config.mpsv1 = append(config.mpsv1, schema1.MappedPipe{HostPath: src, ContainerPipeName: dst})
    53  			config.mpsv2 = append(config.mpsv2, hcsschema.MappedPipe{HostPath: src, ContainerPipeName: dst})
    54  		} else {
    55  			readOnly := false
    56  			for _, o := range mount.Options {
    57  				if strings.ToLower(o) == "ro" {
    58  					readOnly = true
    59  				}
    60  			}
    61  			mdv1 := schema1.MappedDir{HostPath: mount.Source, ContainerPath: mount.Destination, ReadOnly: readOnly}
    62  			mdv2 := hcsschema.MappedDirectory{ContainerPath: mount.Destination, ReadOnly: readOnly}
    63  			if coi.HostingSystem == nil {
    64  				// HCS has a bug where it does not correctly resolve file (not dir) paths
    65  				// if the path includes a symlink. Therefore, we resolve the path here before
    66  				// passing it in. The issue does not occur with VSMB, so don't need to worry
    67  				// about the isolated case.
    68  				src, err := fs.ResolvePath(mount.Source)
    69  				if err != nil {
    70  					return nil, fmt.Errorf("failed to resolve path for mount source %q: %s", mount.Source, err)
    71  				}
    72  				mdv2.HostPath = src
    73  			} else if mount.Type == MountTypeVirtualDisk || mount.Type == MountTypePhysicalDisk || mount.Type == MountTypeExtensibleVirtualDisk {
    74  				// For v2 schema containers, any disk mounts will be part of coi.additionalMounts.
    75  				// For v1 schema containers, we don't even get here, since there is no HostingSystem.
    76  				continue
    77  			} else if strings.HasPrefix(mount.Source, guestpath.SandboxMountPrefix) {
    78  				// Convert to the path in the guest that was asked for.
    79  				mdv2.HostPath = convertToWCOWSandboxMountPath(mount.Source)
    80  			} else {
    81  				// vsmb mount
    82  				uvmPath, err := coi.HostingSystem.GetVSMBUvmPath(ctx, mount.Source, readOnly)
    83  				if err != nil {
    84  					return nil, err
    85  				}
    86  				mdv2.HostPath = uvmPath
    87  			}
    88  			config.mdsv1 = append(config.mdsv1, mdv1)
    89  			config.mdsv2 = append(config.mdsv2, mdv2)
    90  		}
    91  	}
    92  	config.mdsv2 = append(config.mdsv2, coi.windowsAdditionalMounts...)
    93  	return &config, nil
    94  }
    95  
    96  // ConvertCPULimits handles the logic of converting and validating the containers CPU limits
    97  // specified in the OCI spec to what HCS expects.
    98  //
    99  // `cid` is the container's ID.
   100  //
   101  // `vmid` is the Utility VM's ID if the container we're constructing is going to belong to
   102  // one.
   103  //
   104  // `spec` is the OCI spec for the container.
   105  //
   106  // `maxCPUCount` is the maximum cpu count allowed for the container. This value should
   107  // be the number of processors on the host, or in the case of a hypervisor isolated container
   108  // the number of processors assigned to the guest/Utility VM.
   109  //
   110  // Returns the cpu count, cpu limit, and cpu weight in this order. Returns an error if more than one of
   111  // cpu count, cpu limit, or cpu weight was specified in the OCI spec as they are mutually
   112  // exclusive.
   113  func ConvertCPULimits(ctx context.Context, cid string, spec *specs.Spec, maxCPUCount int32) (int32, int32, int32, error) {
   114  	cpuNumSet := 0
   115  	cpuCount := oci.ParseAnnotationsCPUCount(ctx, spec, annotations.ContainerProcessorCount, 0)
   116  	if cpuCount > 0 {
   117  		cpuNumSet++
   118  	}
   119  
   120  	cpuLimit := oci.ParseAnnotationsCPULimit(ctx, spec, annotations.ContainerProcessorLimit, 0)
   121  	if cpuLimit > 0 {
   122  		cpuNumSet++
   123  	}
   124  
   125  	cpuWeight := oci.ParseAnnotationsCPUWeight(ctx, spec, annotations.ContainerProcessorWeight, 0)
   126  	if cpuWeight > 0 {
   127  		cpuNumSet++
   128  	}
   129  
   130  	if cpuNumSet > 1 {
   131  		return 0, 0, 0, fmt.Errorf("invalid spec - Windows Container CPU Count: '%d', Limit: '%d', and Weight: '%d' are mutually exclusive", cpuCount, cpuLimit, cpuWeight)
   132  	} else if cpuNumSet == 1 {
   133  		cpuCount = NormalizeProcessorCount(ctx, cid, cpuCount, maxCPUCount)
   134  	}
   135  	return cpuCount, cpuLimit, cpuWeight, nil
   136  }
   137  
   138  // createWindowsContainerDocument creates documents for passing to HCS or GCS to create
   139  // a container, both hosted and process isolated. It creates both v1 and v2
   140  // container objects, WCOW only. The containers storage should have been mounted already.
   141  func createWindowsContainerDocument(ctx context.Context, coi *createOptionsInternal) (*schema1.ContainerConfig, *hcsschema.Container, error) {
   142  	log.G(ctx).Debug("hcsshim: CreateHCSContainerDocument")
   143  	// TODO: Make this safe if exported so no null pointer dereferences.
   144  
   145  	if coi.Spec == nil {
   146  		return nil, nil, fmt.Errorf("cannot create HCS container document - OCI spec is missing")
   147  	}
   148  
   149  	if coi.Spec.Windows == nil {
   150  		return nil, nil, fmt.Errorf("cannot create HCS container document - OCI spec Windows section is missing ")
   151  	}
   152  
   153  	v1 := &schema1.ContainerConfig{
   154  		SystemType:              "Container",
   155  		Name:                    coi.actualID,
   156  		Owner:                   coi.actualOwner,
   157  		HvPartition:             false,
   158  		IgnoreFlushesDuringBoot: coi.Spec.Windows.IgnoreFlushesDuringBoot,
   159  	}
   160  
   161  	// IgnoreFlushesDuringBoot is a property of the SCSI attachment for the scratch. Set when it's hot-added to the utility VM
   162  	// ID is a property on the create call in V2 rather than part of the schema.
   163  	v2Container := &hcsschema.Container{Storage: &hcsschema.Storage{}}
   164  
   165  	// TODO: Still want to revisit this.
   166  	if coi.Spec.Windows.LayerFolders == nil || len(coi.Spec.Windows.LayerFolders) < 2 {
   167  		return nil, nil, fmt.Errorf("invalid spec - not enough layer folders supplied")
   168  	}
   169  
   170  	if coi.Spec.Hostname != "" {
   171  		v1.HostName = coi.Spec.Hostname
   172  		v2Container.GuestOs = &hcsschema.GuestOs{HostName: coi.Spec.Hostname}
   173  	}
   174  
   175  	var (
   176  		uvmCPUCount  int32
   177  		hostCPUCount = processorinfo.ProcessorCount()
   178  		maxCPUCount  = hostCPUCount
   179  	)
   180  
   181  	if coi.HostingSystem != nil {
   182  		uvmCPUCount = coi.HostingSystem.ProcessorCount()
   183  		maxCPUCount = uvmCPUCount
   184  	}
   185  
   186  	cpuCount, cpuLimit, cpuWeight, err := ConvertCPULimits(ctx, coi.ID, coi.Spec, maxCPUCount)
   187  	if err != nil {
   188  		return nil, nil, err
   189  	}
   190  
   191  	if coi.HostingSystem != nil && coi.ScaleCPULimitsToSandbox && cpuLimit > 0 {
   192  		// When ScaleCPULimitsToSandbox is set and we are running in a UVM, we assume
   193  		// the CPU limit has been calculated based on the number of processors on the
   194  		// host, and instead re-calculate it based on the number of processors in the UVM.
   195  		//
   196  		// This is needed to work correctly with assumptions kubelet makes when computing
   197  		// the CPU limit value:
   198  		// - kubelet thinks about CPU limits in terms of millicores, which are 1000ths of
   199  		//   cores. So if 2000 millicores are assigned, the container can use 2 processors.
   200  		// - In Windows, the job object CPU limit is global across all processors on the
   201  		//   system, and is represented as a fraction out of 10000. In this model, a limit
   202  		//   of 10000 means the container can use all processors fully, regardless of how
   203  		//   many processors exist on the system.
   204  		// - To convert the millicores value into the job object limit, kubelet divides
   205  		//   the millicores by the number of CPU cores on the host. This causes problems
   206  		//   when running inside a UVM, as the UVM may have a different number of processors
   207  		//   than the host system.
   208  		//
   209  		// To work around this, we undo the division by the number of host processors, and
   210  		// re-do the division based on the number of processors inside the UVM. This will
   211  		// give the correct value based on the actual number of millicores that the kubelet
   212  		// wants the container to have.
   213  		//
   214  		// Kubelet formula to compute CPU limit:
   215  		// cpuMaximum := 10000 * cpuLimit.MilliValue() / int64(runtime.NumCPU()) / 1000
   216  		newCPULimit := cpuLimit * hostCPUCount / uvmCPUCount
   217  		// We only apply bounds here because we are calculating the CPU limit ourselves,
   218  		// and this matches the kubelet behavior where they also bound the CPU limit by [1, 10000].
   219  		// In the case where we use the value directly from the user, we don't alter it to fit
   220  		// within the bounds, but just let the platform throw an error if it is invalid.
   221  		if newCPULimit < 1 {
   222  			newCPULimit = 1
   223  		} else if newCPULimit > 10000 {
   224  			newCPULimit = 10000
   225  		}
   226  		log.G(ctx).WithFields(logrus.Fields{
   227  			"hostCPUCount": hostCPUCount,
   228  			"uvmCPUCount":  uvmCPUCount,
   229  			"oldCPULimit":  cpuLimit,
   230  			"newCPULimit":  newCPULimit,
   231  		}).Info("rescaling CPU limit for UVM sandbox")
   232  		cpuLimit = newCPULimit
   233  	}
   234  
   235  	v1.ProcessorCount = uint32(cpuCount)
   236  	v1.ProcessorMaximum = int64(cpuLimit)
   237  	v1.ProcessorWeight = uint64(cpuWeight)
   238  
   239  	v2Container.Processor = &hcsschema.Processor{
   240  		Count:   cpuCount,
   241  		Maximum: cpuLimit,
   242  		Weight:  cpuWeight,
   243  	}
   244  
   245  	// Memory Resources
   246  	memoryMaxInMB := oci.ParseAnnotationsMemory(ctx, coi.Spec, annotations.ContainerMemorySizeInMB, 0)
   247  	if memoryMaxInMB > 0 {
   248  		v1.MemoryMaximumInMB = int64(memoryMaxInMB)
   249  		v2Container.Memory = &hcsschema.Memory{
   250  			SizeInMB: memoryMaxInMB,
   251  		}
   252  	}
   253  
   254  	// Storage Resources
   255  	storageBandwidthMax := oci.ParseAnnotationsStorageBps(ctx, coi.Spec, annotations.ContainerStorageQoSBandwidthMaximum, 0)
   256  	storageIopsMax := oci.ParseAnnotationsStorageIops(ctx, coi.Spec, annotations.ContainerStorageQoSIopsMaximum, 0)
   257  	if storageBandwidthMax > 0 || storageIopsMax > 0 {
   258  		v1.StorageBandwidthMaximum = uint64(storageBandwidthMax)
   259  		v1.StorageIOPSMaximum = uint64(storageIopsMax)
   260  		v2Container.Storage.QoS = &hcsschema.StorageQoS{
   261  			BandwidthMaximum: storageBandwidthMax,
   262  			IopsMaximum:      storageIopsMax,
   263  		}
   264  	}
   265  
   266  	// TODO V2 networking. Only partial at the moment. v2.Container.Networking.Namespace specifically
   267  	if coi.Spec.Windows.Network != nil {
   268  		v2Container.Networking = &hcsschema.Networking{}
   269  
   270  		v1.EndpointList = coi.Spec.Windows.Network.EndpointList
   271  
   272  		v2Container.Networking.Namespace = coi.actualNetworkNamespace
   273  
   274  		v1.AllowUnqualifiedDNSQuery = coi.Spec.Windows.Network.AllowUnqualifiedDNSQuery
   275  		v2Container.Networking.AllowUnqualifiedDnsQuery = v1.AllowUnqualifiedDNSQuery
   276  
   277  		if coi.Spec.Windows.Network.DNSSearchList != nil {
   278  			v1.DNSSearchList = strings.Join(coi.Spec.Windows.Network.DNSSearchList, ",")
   279  			v2Container.Networking.DnsSearchList = v1.DNSSearchList
   280  		}
   281  
   282  		v1.NetworkSharedContainerName = coi.Spec.Windows.Network.NetworkSharedContainerName
   283  		v2Container.Networking.NetworkSharedContainerName = v1.NetworkSharedContainerName
   284  	}
   285  
   286  	if cs, ok := coi.Spec.Windows.CredentialSpec.(string); ok {
   287  		v1.Credentials = cs
   288  		// If this is a HCS v2 schema container, we created the CCG instance
   289  		// with the other container resources. Pass the CCG state information
   290  		// as part of the container document.
   291  		if coi.ccgState != nil {
   292  			v2Container.ContainerCredentialGuard = coi.ccgState
   293  		}
   294  	}
   295  
   296  	if coi.Spec.Root == nil {
   297  		return nil, nil, fmt.Errorf("spec is invalid - root isn't populated")
   298  	}
   299  
   300  	if coi.Spec.Root.Readonly {
   301  		return nil, nil, fmt.Errorf(`invalid container spec - readonly is not supported for Windows containers`)
   302  	}
   303  
   304  	// Strip off the top-most RW/scratch layer as that's passed in separately to HCS for v1
   305  	v1.LayerFolderPath = coi.Spec.Windows.LayerFolders[len(coi.Spec.Windows.LayerFolders)-1]
   306  
   307  	if coi.isV2Argon() || coi.isV1Argon() {
   308  		// Argon v1 or v2.
   309  		const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}(|\\)$`
   310  		if matched, err := regexp.MatchString(volumeGUIDRegex, coi.Spec.Root.Path); !matched || err != nil {
   311  			return nil, nil, fmt.Errorf(`invalid container spec - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, coi.Spec.Root.Path)
   312  		}
   313  		if coi.Spec.Root.Path[len(coi.Spec.Root.Path)-1] != '\\' {
   314  			coi.Spec.Root.Path += `\` // Be nice to clients and make sure well-formed for back-compat
   315  		}
   316  		v1.VolumePath = coi.Spec.Root.Path[:len(coi.Spec.Root.Path)-1] // Strip the trailing backslash. Required for v1.
   317  		v2Container.Storage.Path = coi.Spec.Root.Path
   318  	} else if coi.isV1Xenon() {
   319  		// V1 Xenon
   320  		v1.HvPartition = true
   321  		if coi.Spec == nil || coi.Spec.Windows == nil || coi.Spec.Windows.HyperV == nil { // Be resilient to nil de-reference
   322  			return nil, nil, fmt.Errorf(`invalid container spec - Spec.Windows.HyperV is nil`)
   323  		}
   324  		if coi.Spec.Windows.HyperV.UtilityVMPath != "" {
   325  			// Client-supplied utility VM path
   326  			v1.HvRuntime = &schema1.HvRuntime{ImagePath: coi.Spec.Windows.HyperV.UtilityVMPath}
   327  		} else {
   328  			// Client was lazy. Let's locate it from the layer folders instead.
   329  			uvmImagePath, err := uvmfolder.LocateUVMFolder(ctx, coi.Spec.Windows.LayerFolders)
   330  			if err != nil {
   331  				return nil, nil, err
   332  			}
   333  			v1.HvRuntime = &schema1.HvRuntime{ImagePath: filepath.Join(uvmImagePath, `UtilityVM`)}
   334  		}
   335  	} else if coi.isV2Xenon() {
   336  		// Hosting system was supplied, so is v2 Xenon.
   337  		v2Container.Storage.Path = coi.Spec.Root.Path
   338  		if coi.HostingSystem.OS() == "windows" {
   339  			layers, err := layers.GetHCSLayers(ctx, coi.HostingSystem, coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1])
   340  			if err != nil {
   341  				return nil, nil, err
   342  			}
   343  			v2Container.Storage.Layers = layers
   344  		}
   345  	}
   346  
   347  	if coi.isV2Argon() || coi.isV1Argon() { // Argon v1 or v2
   348  		for _, layerPath := range coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1] {
   349  			layerID, err := wclayer.LayerID(ctx, layerPath)
   350  			if err != nil {
   351  				return nil, nil, err
   352  			}
   353  			v1.Layers = append(v1.Layers, schema1.Layer{ID: layerID.String(), Path: layerPath})
   354  			v2Container.Storage.Layers = append(v2Container.Storage.Layers, hcsschema.Layer{Id: layerID.String(), Path: layerPath})
   355  		}
   356  	}
   357  
   358  	mounts, err := createMountsConfig(ctx, coi)
   359  	if err != nil {
   360  		return nil, nil, err
   361  	}
   362  	v1.MappedDirectories = mounts.mdsv1
   363  	v2Container.MappedDirectories = mounts.mdsv2
   364  	if len(mounts.mpsv1) > 0 && osversion.Build() < osversion.RS3 {
   365  		return nil, nil, fmt.Errorf("named pipe mounts are not supported on this version of Windows")
   366  	}
   367  	v1.MappedPipes = mounts.mpsv1
   368  	v2Container.MappedPipes = mounts.mpsv2
   369  
   370  	// add assigned devices to the container definition
   371  	if err := parseAssignedDevices(ctx, coi, v2Container); err != nil {
   372  		return nil, nil, err
   373  	}
   374  
   375  	// add any device extensions
   376  	extensions, err := getDeviceExtensions(coi.Spec.Annotations)
   377  	if err != nil {
   378  		return nil, nil, err
   379  	}
   380  	v2Container.AdditionalDeviceNamespace = extensions
   381  
   382  	// Process dump setup (if requested)
   383  	dumpPath := ""
   384  	if coi.HostingSystem != nil {
   385  		dumpPath = coi.HostingSystem.ProcessDumpLocation()
   386  	}
   387  
   388  	if specDumpPath, ok := coi.Spec.Annotations[annotations.ContainerProcessDumpLocation]; ok {
   389  		// If a process dump path was specified at pod creation time for a hypervisor isolated pod, then
   390  		// use this value. If one was specified on the container creation document then override with this
   391  		// instead. Unlike Linux, Windows containers can set the dump path on a per container basis.
   392  		dumpPath = specDumpPath
   393  	}
   394  
   395  	// Servercore images block on signaling and wait until the target process
   396  	// is terminated to return to its caller. By default, servercore waits for
   397  	// 5 seconds (default value of 'WaitToKillServiceTimeout') before sending
   398  	// a SIGKILL to terminate the process. This causes issues when graceful
   399  	// termination of containers is requested (Bug36689012).
   400  	// The regkey 'WaitToKillServiceTimeout' value is overridden here to help
   401  	// honor graceful termination of containers by waiting for the requested
   402  	// amount of time before stopping the container.
   403  	// More details on the implementation of this fix can be found in the Kill()
   404  	// function of exec_hcs.go
   405  
   406  	// 'WaitToKillServiceTimeout' reg key value is arbitrarily chosen and set to a
   407  	// value that is long enough that no one will want to wait longer
   408  	registryAdd := []hcsschema.RegistryValue{
   409  		{
   410  			Key: &hcsschema.RegistryKey{
   411  				Hive: "System",
   412  				Name: "ControlSet001\\Control",
   413  			},
   414  			Name:        "WaitToKillServiceTimeout",
   415  			StringValue: strconv.Itoa(math.MaxInt32),
   416  			Type_:       "String",
   417  		},
   418  	}
   419  
   420  	if dumpPath != "" {
   421  		//  If dumpPath specified has createContainerSubdirectoryForProcessDumpSuffix substring
   422  		// specified as a suffix, then create subdirectory for this container at the specified
   423  		// dumpPath location. When a fileshare from the host is mounted to the specified dumpPath,
   424  		// this behavior will help identify dumps coming from differnet containers in the pod.
   425  		// Check for createContainerSubdirectoryForProcessDumpSuffix in lower case and upper case
   426  		if strings.HasSuffix(dumpPath, createContainerSubdirectoryForProcessDumpSuffix) {
   427  			// replace {container_id} with the actual container id
   428  			dumpPath = strings.TrimSuffix(dumpPath, createContainerSubdirectoryForProcessDumpSuffix) + coi.ID
   429  		} else if strings.HasSuffix(dumpPath, strings.ToUpper(createContainerSubdirectoryForProcessDumpSuffix)) {
   430  			// replace {CONTAINER_ID} with the actual container id
   431  			dumpPath = strings.TrimSuffix(dumpPath, strings.ToUpper(createContainerSubdirectoryForProcessDumpSuffix)) + coi.ID
   432  		}
   433  		dumpType, err := parseDumpType(coi.Spec.Annotations)
   434  		if err != nil {
   435  			return nil, nil, err
   436  		}
   437  		dumpCount, err := parseDumpCount(coi.Spec.Annotations)
   438  		if err != nil {
   439  			return nil, nil, err
   440  		}
   441  
   442  		// Setup WER registry keys for local process dump creation if specified.
   443  		// https://docs.microsoft.com/en-us/windows/win32/wer/collecting-user-mode-dumps
   444  		registryAdd = append(registryAdd, []hcsschema.RegistryValue{
   445  			{
   446  				Key: &hcsschema.RegistryKey{
   447  					Hive: "Software",
   448  					Name: "Microsoft\\Windows\\Windows Error Reporting\\LocalDumps",
   449  				},
   450  				Name:        "DumpFolder",
   451  				StringValue: dumpPath,
   452  				Type_:       "String",
   453  			},
   454  			{
   455  				Key: &hcsschema.RegistryKey{
   456  					Hive: "Software",
   457  					Name: "Microsoft\\Windows\\Windows Error Reporting\\LocalDumps",
   458  				},
   459  				Name:       "DumpType",
   460  				DWordValue: dumpType,
   461  				Type_:      "DWord",
   462  			},
   463  			{
   464  				Key: &hcsschema.RegistryKey{
   465  					Hive: "Software",
   466  					Name: "Microsoft\\Windows\\Windows Error Reporting\\LocalDumps",
   467  				},
   468  				Name:       "DumpCount",
   469  				DWordValue: dumpCount,
   470  				Type_:      "DWord",
   471  			},
   472  		}...)
   473  	}
   474  
   475  	v2Container.RegistryChanges = &hcsschema.RegistryChanges{
   476  		AddValues: registryAdd,
   477  	}
   478  	return v1, v2Container, nil
   479  }
   480  
   481  // parseAssignedDevices parses assigned devices for the container definition
   482  // this is currently supported for v2 argon and xenon only
   483  func parseAssignedDevices(ctx context.Context, coi *createOptionsInternal, v2 *hcsschema.Container) error {
   484  	if !coi.isV2Argon() && !coi.isV2Xenon() {
   485  		return nil
   486  	}
   487  
   488  	v2AssignedDevices := []hcsschema.Device{}
   489  	for _, d := range coi.Spec.Windows.Devices {
   490  		v2Dev := hcsschema.Device{}
   491  		switch d.IDType {
   492  		case uvm.VPCILocationPathIDType:
   493  			v2Dev.LocationPath = d.ID
   494  			v2Dev.Type = hcsschema.DeviceInstanceID
   495  		case uvm.VPCIClassGUIDTypeLegacy:
   496  			v2Dev.InterfaceClassGuid = d.ID
   497  		case uvm.VPCIClassGUIDType:
   498  			v2Dev.InterfaceClassGuid = d.ID
   499  		default:
   500  			return fmt.Errorf("specified device %s has unsupported type %s", d.ID, d.IDType)
   501  		}
   502  		log.G(ctx).WithField("hcsv2 device", v2Dev).Debug("adding assigned device to container doc")
   503  		v2AssignedDevices = append(v2AssignedDevices, v2Dev)
   504  	}
   505  	v2.AssignedDevices = v2AssignedDevices
   506  	return nil
   507  }
   508  
   509  func parseDumpCount(annots map[string]string) (int32, error) {
   510  	dmpCountStr := annots[annotations.WCOWProcessDumpCount]
   511  	if dmpCountStr == "" {
   512  		// If no count is specified, default of 10 is set.
   513  		return 10, nil
   514  	}
   515  
   516  	dumpCount, err := strconv.Atoi(dmpCountStr)
   517  	if err != nil {
   518  		return -1, err
   519  	}
   520  	if dumpCount > 0 {
   521  		return int32(dumpCount), nil
   522  	}
   523  	return -1, fmt.Errorf("invaid dump count specified: %v", dmpCountStr)
   524  }
   525  
   526  // parseDumpType parses the passed in string representation of the local user mode process dump type to the
   527  // corresponding value the registry expects to be set.
   528  //
   529  // See DumpType at https://docs.microsoft.com/en-us/windows/win32/wer/collecting-user-mode-dumps for the mappings
   530  func parseDumpType(annots map[string]string) (int32, error) {
   531  	dmpTypeStr := annots[annotations.WCOWProcessDumpType]
   532  	switch dmpTypeStr {
   533  	case "":
   534  		// If no type specified, default to full dumps.
   535  		return 2, nil
   536  	case "mini":
   537  		return 1, nil
   538  	case "full":
   539  		return 2, nil
   540  	default:
   541  		return -1, errors.New(`unknown dump type specified, valid values are "mini" or "full"`)
   542  	}
   543  }
   544  

View as plain text