...

Text file src/github.com/Microsoft/hcsshim/pkg/securitypolicy/framework.rego

Documentation: github.com/Microsoft/hcsshim/pkg/securitypolicy

     1package framework
     2
     3import future.keywords.every
     4import future.keywords.in
     5
     6version := "@@FRAMEWORK_VERSION@@"
     7
     8device_mounted(target) {
     9    data.metadata.devices[target]
    10}
    11
    12default deviceHash_ok := false
    13
    14# test if a device hash exists as a layer in a policy container
    15deviceHash_ok {
    16    layer := data.policy.containers[_].layers[_]
    17    input.deviceHash == layer
    18}
    19
    20# test if a device hash exists as a layer in a fragment container
    21deviceHash_ok {
    22    feed := data.metadata.issuers[_].feeds[_]
    23    some fragment in feed
    24    layer := fragment.containers[_].layers[_]
    25    input.deviceHash == layer
    26}
    27
    28default mount_device := {"allowed": false}
    29
    30mount_device := {"metadata": [addDevice], "allowed": true} {
    31    not device_mounted(input.target)
    32    deviceHash_ok
    33    addDevice := {
    34        "name": "devices",
    35        "action": "add",
    36        "key": input.target,
    37        "value": input.deviceHash,
    38    }
    39}
    40
    41default unmount_device := {"allowed": false}
    42
    43unmount_device := {"metadata": [removeDevice], "allowed": true} {
    44    device_mounted(input.unmountTarget)
    45    removeDevice := {
    46        "name": "devices",
    47        "action": "remove",
    48        "key": input.unmountTarget,
    49    }
    50}
    51
    52layerPaths_ok(layers) {
    53    length := count(layers)
    54    count(input.layerPaths) == length
    55    every i, path in input.layerPaths {
    56        layers[(length - i) - 1] == data.metadata.devices[path]
    57    }
    58}
    59
    60default overlay_exists := false
    61
    62overlay_exists {
    63    data.metadata.matches[input.containerID]
    64}
    65
    66overlay_mounted(target) {
    67    data.metadata.overlayTargets[target]
    68}
    69
    70default candidate_containers := []
    71
    72candidate_containers := containers {
    73    semver.compare(policy_framework_version, version) == 0
    74
    75    policy_containers := [c | c := data.policy.containers[_]]
    76    fragment_containers := [c |
    77        feed := data.metadata.issuers[_].feeds[_]
    78        fragment := feed[_]
    79        c := fragment.containers[_]
    80    ]
    81
    82    containers := array.concat(policy_containers, fragment_containers)
    83}
    84
    85candidate_containers := containers {
    86    semver.compare(policy_framework_version, version) < 0
    87
    88    policy_containers := apply_defaults("container", data.policy.containers, policy_framework_version)
    89    fragment_containers := [c |
    90        feed := data.metadata.issuers[_].feeds[_]
    91        fragment := feed[_]
    92        c := fragment.containers[_]
    93    ]
    94
    95    containers := array.concat(policy_containers, fragment_containers)
    96}
    97
    98default mount_overlay := {"allowed": false}
    99
   100mount_overlay := {"metadata": [addMatches, addOverlayTarget], "allowed": true} {
   101    not overlay_exists
   102
   103    containers := [container |
   104        container := candidate_containers[_]
   105        layerPaths_ok(container.layers)
   106    ]
   107
   108    count(containers) > 0
   109    addMatches := {
   110        "name": "matches",
   111        "action": "add",
   112        "key": input.containerID,
   113        "value": containers,
   114    }
   115
   116    addOverlayTarget := {
   117        "name": "overlayTargets",
   118        "action": "add",
   119        "key": input.target,
   120        "value": true,
   121    }
   122}
   123
   124default unmount_overlay := {"allowed": false}
   125
   126unmount_overlay := {"metadata": [removeOverlayTarget], "allowed": true} {
   127    overlay_mounted(input.unmountTarget)
   128    removeOverlayTarget := {
   129        "name": "overlayTargets",
   130        "action": "remove",
   131        "key": input.unmountTarget,
   132    }
   133}
   134
   135command_ok(command) {
   136    count(input.argList) == count(command)
   137    every i, arg in input.argList {
   138        command[i] == arg
   139    }
   140}
   141
   142env_ok(pattern, "string", value) {
   143    pattern == value
   144}
   145
   146env_ok(pattern, "re2", value) {
   147    regex.match(pattern, value)
   148}
   149
   150rule_ok(rule, env) {
   151    not rule.required
   152}
   153
   154rule_ok(rule, env) {
   155    rule.required
   156    env_ok(rule.pattern, rule.strategy, env)
   157}
   158
   159envList_ok(env_rules, envList) {
   160    every rule in env_rules {
   161        some env in envList
   162        rule_ok(rule, env)
   163    }
   164
   165    every env in envList {
   166        some rule in env_rules
   167        env_ok(rule.pattern, rule.strategy, env)
   168    }
   169}
   170
   171valid_envs_subset(env_rules) := envs {
   172    envs := {env |
   173        some env in input.envList
   174        some rule in env_rules
   175        env_ok(rule.pattern, rule.strategy, env)
   176    }
   177}
   178
   179valid_envs_for_all(items) := envs {
   180    allow_environment_variable_dropping
   181
   182    # for each item, find a subset of the environment rules
   183    # that are valid
   184    valid := [envs |
   185        some item in items
   186        envs := valid_envs_subset(item.env_rules)
   187    ]
   188
   189    # we want to select the most specific matches, which in this
   190    # case consists of those matches which require dropping the
   191    # fewest environment variables (i.e. the longest lists)
   192    counts := [num_envs |
   193        envs := valid[_]
   194        num_envs := count(envs)
   195    ]
   196    max_count := max(counts)
   197
   198    largest_env_sets := {envs |
   199        some i
   200        counts[i] == max_count
   201        envs := valid[i]
   202    }
   203
   204    # if there is more than one set with the same size, we
   205    # can only proceed if they are all the same, so we verify
   206    # that the intersection is equal to the union. For a single
   207    # set this is trivially true.
   208    envs_i := intersection(largest_env_sets)
   209    envs_u := union(largest_env_sets)
   210    envs_i == envs_u
   211    envs := envs_i
   212}
   213
   214valid_envs_for_all(items) := envs {
   215    not allow_environment_variable_dropping
   216
   217    # no dropping allowed, so we just return the input
   218    envs := input.envList
   219}
   220
   221workingDirectory_ok(working_dir) {
   222    input.workingDir == working_dir
   223}
   224
   225privileged_ok(elevation_allowed) {
   226    not input.privileged
   227}
   228
   229privileged_ok(elevation_allowed) {
   230    input.privileged
   231    input.privileged == elevation_allowed
   232}
   233
   234noNewPrivileges_ok(no_new_privileges) {
   235    no_new_privileges
   236    input.noNewPrivileges
   237}
   238
   239noNewPrivileges_ok(no_new_privileges) {
   240    no_new_privileges == false
   241}
   242
   243idName_ok(pattern, "any", value) {
   244    true
   245}
   246
   247idName_ok(pattern, "id", value) {
   248    pattern == value.id
   249}
   250
   251idName_ok(pattern, "name", value) {
   252    pattern == value.name
   253}
   254
   255idName_ok(pattern, "re2", value) {
   256    regex.match(pattern, value.name)
   257}
   258
   259user_ok(user) {
   260    user.umask == input.umask
   261    idName_ok(user.user_idname.pattern, user.user_idname.strategy, input.user)
   262    every group in input.groups {
   263        some group_idname in user.group_idnames
   264        idName_ok(group_idname.pattern, group_idname.strategy, group)
   265    }
   266}
   267
   268seccomp_ok(seccomp_profile_sha256) {
   269    input.seccompProfileSHA256 == seccomp_profile_sha256
   270}
   271
   272default container_started := false
   273
   274container_started {
   275    data.metadata.started[input.containerID]
   276}
   277
   278default container_privileged := false
   279
   280container_privileged {
   281    data.metadata.started[input.containerID].privileged
   282}
   283
   284capsList_ok(allowed_caps_list, requested_caps_list) {
   285    count(allowed_caps_list) == count(requested_caps_list)
   286
   287    every cap in requested_caps_list {
   288        some allowed in allowed_caps_list
   289        cap == allowed
   290    }
   291
   292    every allowed in allowed_caps_list {
   293        some cap in requested_caps_list
   294        allowed == cap
   295    }
   296}
   297
   298filter_capsList_by_allowed(allowed_caps_list, requested_caps_list) := caps {
   299    # find a subset of the capabilities that are valid
   300    caps := {cap |
   301        some cap in requested_caps_list
   302        some allowed in allowed_caps_list
   303        cap == allowed
   304    }
   305}
   306
   307filter_capsList_for_single_container(allowed_caps) := caps {
   308    bounding := filter_capsList_by_allowed(allowed_caps.bounding, input.capabilities.bounding)
   309    effective := filter_capsList_by_allowed(allowed_caps.effective, input.capabilities.effective)
   310    inheritable := filter_capsList_by_allowed(allowed_caps.inheritable, input.capabilities.inheritable)
   311    permitted := filter_capsList_by_allowed(allowed_caps.permitted, input.capabilities.permitted)
   312    ambient := filter_capsList_by_allowed(allowed_caps.ambient, input.capabilities.ambient)
   313
   314    caps := {
   315        "bounding": bounding,
   316        "effective": effective,
   317        "inheritable": inheritable,
   318        "permitted": permitted,
   319        "ambient": ambient
   320    }
   321}
   322
   323largest_caps_sets_for_all(containers, privileged) := largest_caps_sets {
   324    filtered := [caps |
   325        container := containers[_]
   326        capabilities := get_capabilities(container, privileged)
   327        caps := filter_capsList_for_single_container(capabilities)
   328    ]
   329
   330    # we want to select the most specific matches, which in this
   331    # case consists of those matches which require dropping the
   332    # fewest capabilities (i.e. the longest lists)
   333    counts := [num_caps |
   334        caps := filtered[_]
   335        num_caps := count(caps.bounding) + count(caps.effective) +
   336            count(caps.inheritable) + count(caps.permitted) +
   337            count(caps.ambient)
   338    ]
   339    max_count := max(counts)
   340
   341    largest_caps_sets := [caps |
   342        some i
   343        counts[i] == max_count
   344        caps := filtered[i]
   345    ]
   346}
   347
   348all_caps_sets_are_equal(sets) := caps {
   349    # if there is more than one set with the same size, we
   350    # can only proceed if they are all the same, so we verify
   351    # that the intersection is equal to the union. For a single
   352    # set this is trivially true.
   353    bounding_i := intersection({caps.bounding | caps := sets[_]})
   354    effective_i := intersection({caps.effective | caps := sets[_]})
   355    inheritable_i := intersection({caps.inheritable | caps := sets[_]})
   356    permitted_i := intersection({caps.permitted | caps := sets[_]})
   357    ambient_i := intersection({caps.ambient | caps := sets[_]})
   358
   359    bounding_u := union({caps.bounding | caps := sets[_]})
   360    effective_u := union({caps.effective | caps := sets[_]})
   361    inheritable_u := union({caps.inheritable | caps := sets[_]})
   362    permitted_u := union({caps.permitted | caps := sets[_]})
   363    ambient_u := union({caps.ambient | caps := sets[_]})
   364
   365    bounding_i == bounding_u
   366    effective_i == effective_u
   367    inheritable_i == inheritable_u
   368    permitted_i == permitted_u
   369    ambient_i == ambient_u
   370
   371    caps := {
   372        "bounding": bounding_i,
   373        "effective": effective_i,
   374        "inheritable": inheritable_i,
   375        "permitted": permitted_i,
   376        "ambient": ambient_i,
   377    }
   378}
   379
   380valid_caps_for_all(containers, privileged) := caps {
   381    allow_capability_dropping
   382
   383    # find largest matching capabilities sets aka "the most specific"
   384    largest_caps_sets := largest_caps_sets_for_all(containers, privileged)
   385
   386    # if there is more than one set with the same size, we
   387    # can only proceed if they are all the same
   388    caps := all_caps_sets_are_equal(largest_caps_sets)
   389}
   390
   391valid_caps_for_all(containers, privileged) := caps {
   392    not allow_capability_dropping
   393
   394    # no dropping allowed, so we just return the input
   395    caps := input.capabilities
   396}
   397
   398caps_ok(allowed_caps, requested_caps) {
   399    capsList_ok(allowed_caps.bounding, requested_caps.bounding)
   400    capsList_ok(allowed_caps.effective, requested_caps.effective)
   401    capsList_ok(allowed_caps.inheritable, requested_caps.inheritable)
   402    capsList_ok(allowed_caps.permitted, requested_caps.permitted)
   403    capsList_ok(allowed_caps.ambient, requested_caps.ambient)
   404}
   405
   406get_capabilities(container, privileged) := capabilities {
   407    container.capabilities != null
   408    capabilities := container.capabilities
   409}
   410
   411default_privileged_capabilities := capabilities {
   412    caps := {cap | cap := data.defaultPrivilegedCapabilities[_]}
   413    capabilities := {
   414        "bounding": caps,
   415        "effective": caps,
   416        "inheritable": caps,
   417        "permitted": caps,
   418        "ambient": set(),
   419    }
   420}
   421
   422get_capabilities(container, true) := capabilities {
   423    container.capabilities == null
   424    container.allow_elevated
   425    capabilities := default_privileged_capabilities
   426}
   427
   428default_unprivileged_capabilities := capabilities {
   429    caps := {cap | cap := data.defaultUnprivilegedCapabilities[_]}
   430    capabilities := {
   431        "bounding": caps,
   432        "effective": caps,
   433        "inheritable": set(),
   434        "permitted": caps,
   435        "ambient": set(),
   436    }
   437}
   438
   439get_capabilities(container, false) := capabilities {
   440    container.capabilities == null
   441    container.allow_elevated
   442    capabilities := default_unprivileged_capabilities
   443}
   444
   445get_capabilities(container, privileged) := capabilities {
   446    container.capabilities == null
   447    not container.allow_elevated
   448    capabilities := default_unprivileged_capabilities
   449}
   450
   451default create_container := {"allowed": false}
   452
   453create_container := {"metadata": [updateMatches, addStarted],
   454                     "env_list": env_list,
   455                     "caps_list": caps_list,
   456                     "allow_stdio_access": allow_stdio_access,
   457                     "allowed": true} {
   458    not container_started
   459
   460    # narrow the matches based upon command, working directory, and
   461    # mount list
   462    possible_after_initial_containers := [container |
   463        container := data.metadata.matches[input.containerID][_]
   464        # NB any change to these narrowing conditions should be reflected in
   465        # the error handling, such that error messaging correctly reflects
   466        # the narrowing process.
   467        noNewPrivileges_ok(container.no_new_privileges)
   468        user_ok(container.user)
   469        privileged_ok(container.allow_elevated)
   470        workingDirectory_ok(container.working_dir)
   471        command_ok(container.command)
   472        mountList_ok(container.mounts, container.allow_elevated)
   473        seccomp_ok(container.seccomp_profile_sha256)
   474    ]
   475
   476    count(possible_after_initial_containers) > 0
   477
   478    # check to see if the environment variables match, dropping
   479    # them if allowed (and necessary)
   480    env_list := valid_envs_for_all(possible_after_initial_containers)
   481    possible_after_env_containers := [container |
   482        container := possible_after_initial_containers[_]
   483        envList_ok(container.env_rules, env_list)
   484    ]
   485
   486    count(possible_after_env_containers) > 0
   487
   488    # check to see if the capabilities variables match, dropping
   489    # them if allowed (and necessary)
   490    caps_list := valid_caps_for_all(possible_after_env_containers, input.privileged)
   491    possible_after_caps_containers := [container |
   492        container := possible_after_env_containers[_]
   493        caps_ok(get_capabilities(container, input.privileged), caps_list)
   494    ]
   495
   496    count(possible_after_caps_containers) > 0
   497
   498    # set final container list
   499    containers := possible_after_caps_containers
   500
   501    # we can't do narrowing based on allowing stdio access so at this point
   502    # every container from the policy that might match this create request
   503    # must have the same allow stdio value otherwise, we are in an undecidable
   504    # state
   505    allow_stdio_access := containers[0].allow_stdio_access
   506    every c in containers {
   507        c.allow_stdio_access == allow_stdio_access
   508    }
   509
   510    updateMatches := {
   511        "name": "matches",
   512        "action": "update",
   513        "key": input.containerID,
   514        "value": containers,
   515    }
   516
   517    addStarted := {
   518        "name": "started",
   519        "action": "add",
   520        "key": input.containerID,
   521        "value": {
   522            "privileged": input.privileged,
   523        },
   524    }
   525}
   526
   527mountSource_ok(constraint, source) {
   528    startswith(constraint, data.sandboxPrefix)
   529    newConstraint := replace(constraint, data.sandboxPrefix, input.sandboxDir)
   530    regex.match(newConstraint, source)
   531}
   532
   533mountSource_ok(constraint, source) {
   534    startswith(constraint, data.hugePagesPrefix)
   535    newConstraint := replace(constraint, data.hugePagesPrefix, input.hugePagesDir)
   536    regex.match(newConstraint, source)
   537}
   538
   539mountSource_ok(constraint, source) {
   540    startswith(constraint, data.plan9Prefix)
   541    some target, containerID in data.metadata.p9mounts
   542    source == target
   543    input.containerID == containerID
   544}
   545
   546mountSource_ok(constraint, source) {
   547    constraint == source
   548}
   549
   550mountConstraint_ok(constraint, mount) {
   551    mount.type == constraint.type
   552    mountSource_ok(constraint.source, mount.source)
   553    mount.destination != ""
   554    mount.destination == constraint.destination
   555
   556    # the following check is not required (as the following tests will prove this
   557    # condition as well), however it will check whether those more expensive
   558    # tests need to be performed.
   559    count(mount.options) == count(constraint.options)
   560    every option in mount.options {
   561        some constraintOption in constraint.options
   562        option == constraintOption
   563    }
   564
   565    every option in constraint.options {
   566        some mountOption in mount.options
   567        option == mountOption
   568    }
   569}
   570
   571mount_ok(mounts, allow_elevated, mount) {
   572    some constraint in mounts
   573    mountConstraint_ok(constraint, mount)
   574}
   575
   576mount_ok(mounts, allow_elevated, mount) {
   577    some constraint in data.defaultMounts
   578    mountConstraint_ok(constraint, mount)
   579}
   580
   581mount_ok(mounts, allow_elevated, mount) {
   582    allow_elevated
   583    some constraint in data.privilegedMounts
   584    mountConstraint_ok(constraint, mount)
   585}
   586
   587mountList_ok(mounts, allow_elevated) {
   588    every mount in input.mounts {
   589        mount_ok(mounts, allow_elevated, mount)
   590    }
   591}
   592
   593default exec_in_container := {"allowed": false}
   594
   595exec_in_container := {"metadata": [updateMatches],
   596                      "env_list": env_list,
   597                      "caps_list": caps_list,
   598                      "allowed": true} {
   599    container_started
   600
   601    # narrow our matches based upon the process requested
   602    possible_after_initial_containers := [container |
   603        container := data.metadata.matches[input.containerID][_]
   604        # NB any change to these narrowing conditions should be reflected in
   605        # the error handling, such that error messaging correctly reflects
   606        # the narrowing process.
   607        workingDirectory_ok(container.working_dir)
   608        noNewPrivileges_ok(container.no_new_privileges)
   609        user_ok(container.user)
   610        some process in container.exec_processes
   611        command_ok(process.command)
   612    ]
   613
   614    count(possible_after_initial_containers) > 0
   615
   616    # check to see if the environment variables match, dropping
   617    # them if allowed (and necessary)
   618    env_list := valid_envs_for_all(possible_after_initial_containers)
   619    possible_after_env_containers := [container |
   620        container := possible_after_initial_containers[_]
   621        envList_ok(container.env_rules, env_list)
   622    ]
   623
   624    count(possible_after_env_containers) > 0
   625
   626    # check to see if the capabilities variables match, dropping
   627    # them if allowed (and necessary)
   628    caps_list := valid_caps_for_all(possible_after_env_containers, container_privileged)
   629    possible_after_caps_containers := [container |
   630        container := possible_after_env_containers[_]
   631        caps_ok(get_capabilities(container, container_privileged), caps_list)
   632    ]
   633
   634    count(possible_after_caps_containers) > 0
   635
   636    # set final container list
   637    containers := possible_after_caps_containers
   638
   639    updateMatches := {
   640        "name": "matches",
   641        "action": "update",
   642        "key": input.containerID,
   643        "value": containers,
   644    }
   645}
   646
   647default shutdown_container := {"allowed": false}
   648
   649shutdown_container := {"started": remove, "metadata": [remove], "allowed": true} {
   650    container_started
   651    remove := {
   652        "name": "matches",
   653        "action": "remove",
   654        "key": input.containerID,
   655    }
   656}
   657
   658default signal_container_process := {"allowed": false}
   659
   660signal_container_process := {"metadata": [updateMatches], "allowed": true} {
   661    container_started
   662    input.isInitProcess
   663    containers := [container |
   664        container := data.metadata.matches[input.containerID][_]
   665        signal_ok(container.signals)
   666    ]
   667
   668    count(containers) > 0
   669    updateMatches := {
   670        "name": "matches",
   671        "action": "update",
   672        "key": input.containerID,
   673        "value": containers,
   674    }
   675}
   676
   677signal_container_process := {"metadata": [updateMatches], "allowed": true} {
   678    container_started
   679    not input.isInitProcess
   680    containers := [container |
   681        container := data.metadata.matches[input.containerID][_]
   682        some process in container.exec_processes
   683        command_ok(process.command)
   684        signal_ok(process.signals)
   685    ]
   686
   687    count(containers) > 0
   688    updateMatches := {
   689        "name": "matches",
   690        "action": "update",
   691        "key": input.containerID,
   692        "value": containers,
   693    }
   694}
   695
   696signal_ok(signals) {
   697    some signal in signals
   698    input.signal == signal
   699}
   700
   701plan9_mounted(target) {
   702    data.metadata.p9mounts[target]
   703}
   704
   705default plan9_mount := {"allowed": false}
   706
   707plan9_mount := {"metadata": [addPlan9Target], "allowed": true} {
   708    not plan9_mounted(input.target)
   709    some containerID, _ in data.metadata.matches
   710    pattern := concat("", [input.rootPrefix, "/", containerID, input.mountPathPrefix])
   711    regex.match(pattern, input.target)
   712    addPlan9Target := {
   713        "name": "p9mounts",
   714        "action": "add",
   715        "key": input.target,
   716        "value": containerID,
   717    }
   718}
   719
   720default plan9_unmount := {"allowed": false}
   721
   722plan9_unmount := {"metadata": [removePlan9Target], "allowed": true} {
   723    plan9_mounted(input.unmountTarget)
   724    removePlan9Target := {
   725        "name": "p9mounts",
   726        "action": "remove",
   727        "key": input.unmountTarget,
   728    }
   729}
   730
   731
   732default enforcement_point_info := {"available": false, "default_results": {"allow": false}, "unknown": true, "invalid": false, "version_missing": false}
   733
   734enforcement_point_info := {"available": false, "default_results": {"allow": false}, "unknown": false, "invalid": false, "version_missing": true} {
   735    policy_api_version == null
   736}
   737
   738enforcement_point_info := {"available": available, "default_results": default_results, "unknown": false, "invalid": false, "version_missing": false} {
   739    enforcement_point := data.api.enforcement_points[input.name]
   740    semver.compare(data.api.version, enforcement_point.introducedVersion) >= 0
   741    available := semver.compare(policy_api_version, enforcement_point.introducedVersion) >= 0
   742    default_results := enforcement_point.default_results
   743}
   744
   745enforcement_point_info := {"available": false, "default_results": {"allow": false}, "unknown": false, "invalid": true, "version_missing": false} {
   746    enforcement_point := data.api.enforcement_points[input.name]
   747    semver.compare(data.api.version, enforcement_point.introducedVersion) < 0
   748}
   749
   750default candidate_external_processes := []
   751
   752candidate_external_processes := external_processes {
   753    semver.compare(policy_framework_version, version) == 0
   754
   755    policy_external_processes := [e | e := data.policy.external_processes[_]]
   756    fragment_external_processes := [e |
   757        feed := data.metadata.issuers[_].feeds[_]
   758        fragment := feed[_]
   759        e := fragment.external_processes[_]
   760    ]
   761
   762    external_processes := array.concat(policy_external_processes, fragment_external_processes)
   763}
   764
   765candidate_external_processes := external_processes {
   766    semver.compare(policy_framework_version, version) < 0
   767
   768    policy_external_processes := apply_defaults("external_process", data.policy.external_processes, policy_framework_version)
   769    fragment_external_processes := [e |
   770        feed := data.metadata.issuers[_].feeds[_]
   771        fragment := feed[_]
   772        e := fragment.external_processes[_]
   773    ]
   774
   775    external_processes := array.concat(policy_external_processes, fragment_external_processes)
   776}
   777
   778external_process_ok(process) {
   779    command_ok(process.command)
   780    envList_ok(process.env_rules, input.envList)
   781    workingDirectory_ok(process.working_dir)
   782}
   783
   784default exec_external := {"allowed": false}
   785
   786exec_external := {"allowed": true,
   787                  "allow_stdio_access": allow_stdio_access,
   788                  "env_list": env_list} {
   789    possible_processes := [process |
   790        process := candidate_external_processes[_]
   791        # NB any change to these narrowing conditions should be reflected in
   792        # the error handling, such that error messaging correctly reflects
   793        # the narrowing process.
   794        workingDirectory_ok(process.working_dir)
   795        command_ok(process.command)
   796    ]
   797
   798    count(possible_processes) > 0
   799
   800    # check to see if the environment variables match, dropping
   801    # them if allowed (and necessary)
   802    env_list := valid_envs_for_all(possible_processes)
   803    processes := [process |
   804        process := possible_processes[_]
   805        envList_ok(process.env_rules, env_list)
   806    ]
   807
   808    count(processes) > 0
   809
   810    allow_stdio_access := processes[0].allow_stdio_access
   811    every p in processes {
   812        p.allow_stdio_access == allow_stdio_access
   813    }
   814}
   815
   816default get_properties := {"allowed": false}
   817
   818get_properties := {"allowed": true} {
   819    allow_properties_access
   820}
   821
   822default dump_stacks := {"allowed": false}
   823
   824dump_stacks := {"allowed": true} {
   825    allow_dump_stacks
   826}
   827
   828default runtime_logging := {"allowed": false}
   829
   830runtime_logging := {"allowed": true} {
   831    allow_runtime_logging
   832}
   833
   834default fragment_containers := []
   835
   836fragment_containers := data[input.namespace].containers
   837
   838default fragment_fragments := []
   839
   840fragment_fragments := data[input.namespace].fragments
   841
   842default fragment_external_processes := []
   843
   844fragment_external_processes := data[input.namespace].external_processes
   845
   846apply_defaults(name, raw_values, framework_version) := values {
   847    semver.compare(framework_version, version) == 0
   848    values := raw_values
   849}
   850
   851apply_defaults("container", raw_values, framework_version) := values {
   852    semver.compare(framework_version, version) < 0
   853    values := [checked |
   854        raw := raw_values[_]
   855        checked := check_container(raw, framework_version)
   856    ]
   857}
   858
   859apply_defaults("external_process", raw_values, framework_version) := values {
   860    semver.compare(framework_version, version) < 0
   861    values := [checked |
   862        raw := raw_values[_]
   863        checked := check_external_process(raw, framework_version)
   864    ]
   865}
   866
   867apply_defaults("fragment", raw_values, framework_version) := values {
   868    semver.compare(framework_version, version) < 0
   869    values := [checked |
   870        raw := raw_values[_]
   871        checked := check_fragment(raw, framework_version)
   872    ]
   873}
   874
   875default fragment_framework_version := null
   876fragment_framework_version := data[input.namespace].framework_version
   877
   878extract_fragment_includes(includes) := fragment {
   879    framework_version := fragment_framework_version
   880    objects := {
   881        "containers": apply_defaults("container", fragment_containers, framework_version),
   882        "fragments": apply_defaults("fragment", fragment_fragments, framework_version),
   883        "external_processes": apply_defaults("external_process", fragment_external_processes, framework_version)
   884    }
   885
   886    fragment := {
   887        include: objects[include] | include := includes[_]
   888    }
   889}
   890
   891issuer_exists(iss) {
   892    data.metadata.issuers[iss]
   893}
   894
   895feed_exists(iss, feed) {
   896    data.metadata.issuers[iss].feeds[feed]
   897}
   898
   899update_issuer(includes) := issuer {
   900    feed_exists(input.issuer, input.feed)
   901    old_issuer := data.metadata.issuers[input.issuer]
   902    old_fragments := old_issuer.feeds[input.feed]
   903    new_issuer := {"feeds": {input.feed: array.concat([extract_fragment_includes(includes)], old_fragments)}}
   904
   905    issuer := object.union(old_issuer, new_issuer)
   906}
   907
   908update_issuer(includes) := issuer {
   909    not feed_exists(input.issuer, input.feed)
   910    old_issuer := data.metadata.issuers[input.issuer]
   911    new_issuer := {"feeds": {input.feed: [extract_fragment_includes(includes)]}}
   912
   913    issuer := object.union(old_issuer, new_issuer)
   914}
   915
   916update_issuer(includes) := issuer {
   917    not issuer_exists(input.issuer)
   918    issuer := {"feeds": {input.feed: [extract_fragment_includes(includes)]}}
   919}
   920
   921default candidate_fragments := []
   922
   923candidate_fragments := fragments {
   924    semver.compare(policy_framework_version, version) == 0
   925
   926    policy_fragments := [f | f := data.policy.fragments[_]]
   927    fragment_fragments := [f |
   928        feed := data.metadata.issuers[_].feeds[_]
   929        fragment := feed[_]
   930        f := fragment.fragments[_]
   931    ]
   932
   933    fragments := array.concat(policy_fragments, fragment_fragments)
   934}
   935
   936candidate_fragments := fragments {
   937    semver.compare(policy_framework_version, version) < 0
   938
   939    policy_fragments := apply_defaults("fragment", data.policy.fragments, policy_framework_version)
   940    fragment_fragments := [f |
   941        feed := data.metadata.issuers[_].feeds[_]
   942        fragment := feed[_]
   943        f := fragment.fragments[_]
   944    ]
   945
   946    fragments := array.concat(policy_fragments, fragment_fragments)
   947}
   948
   949default load_fragment := {"allowed": false}
   950
   951svn_ok(svn, minimum_svn) {
   952    # deprecated
   953    semver.is_valid(svn)
   954    semver.is_valid(minimum_svn)
   955    semver.compare(svn, minimum_svn) >= 0
   956}
   957
   958svn_ok(svn, minimum_svn) {
   959    to_number(svn) >= to_number(minimum_svn)
   960}
   961
   962fragment_ok(fragment) {
   963    input.issuer == fragment.issuer
   964    input.feed == fragment.feed
   965    svn_ok(data[input.namespace].svn, fragment.minimum_svn)
   966}
   967
   968load_fragment := {"metadata": [updateIssuer], "add_module": add_module, "allowed": true} {
   969    some fragment in candidate_fragments
   970    fragment_ok(fragment)
   971
   972    issuer := update_issuer(fragment.includes)
   973    updateIssuer := {
   974        "name": "issuers",
   975        "action": "update",
   976        "key": input.issuer,
   977        "value": issuer,
   978    }
   979
   980    add_module := "namespace" in fragment.includes
   981}
   982
   983default scratch_mount := {"allowed": false}
   984
   985scratch_mounted(target) {
   986    data.metadata.scratch_mounts[target]
   987}
   988
   989scratch_mount := {"metadata": [add_scratch_mount], "allowed": true} {
   990    not scratch_mounted(input.target)
   991    allow_unencrypted_scratch
   992    add_scratch_mount := {
   993        "name": "scratch_mounts",
   994        "action": "add",
   995        "key": input.target,
   996        "value": {"encrypted": input.encrypted},
   997    }
   998}
   999
  1000scratch_mount := {"metadata": [add_scratch_mount], "allowed": true} {
  1001    not scratch_mounted(input.target)
  1002    not allow_unencrypted_scratch
  1003    input.encrypted
  1004    add_scratch_mount := {
  1005        "name": "scratch_mounts",
  1006        "action": "add",
  1007        "key": input.target,
  1008        "value": {"encrypted": input.encrypted},
  1009    }
  1010}
  1011
  1012default scratch_unmount := {"allowed": false}
  1013
  1014scratch_unmount := {"metadata": [remove_scratch_mount], "allowed": true} {
  1015    scratch_mounted(input.unmountTarget)
  1016    remove_scratch_mount := {
  1017        "name": "scratch_mounts",
  1018        "action": "remove",
  1019        "key": input.unmountTarget,
  1020    }
  1021}
  1022
  1023reason := {
  1024    "errors": errors,
  1025    "error_objects": error_objects
  1026}
  1027
  1028################################################################
  1029# Error messages
  1030################################################################
  1031
  1032errors["deviceHash not found"] {
  1033    input.rule == "mount_device"
  1034    not deviceHash_ok
  1035}
  1036
  1037errors["device already mounted at path"] {
  1038    input.rule == "mount_device"
  1039    device_mounted(input.target)
  1040}
  1041
  1042errors["no device at path to unmount"] {
  1043    input.rule == "unmount_device"
  1044    not device_mounted(input.unmountTarget)
  1045}
  1046
  1047errors["container already started"] {
  1048    input.rule == "create_container"
  1049    container_started
  1050}
  1051
  1052errors["container not started"] {
  1053    input.rule in ["exec_in_container", "shutdown_container", "signal_container_process"]
  1054    not container_started
  1055}
  1056
  1057errors["overlay has already been mounted"] {
  1058    input.rule == "mount_overlay"
  1059    overlay_exists
  1060}
  1061
  1062default overlay_matches := false
  1063
  1064overlay_matches {
  1065    some container in candidate_containers
  1066    layerPaths_ok(container.layers)
  1067}
  1068
  1069errors["no overlay at path to unmount"] {
  1070    input.rule == "unmount_overlay"
  1071    not overlay_mounted(input.unmountTarget)
  1072}
  1073
  1074errors["no matching containers for overlay"] {
  1075    input.rule == "mount_overlay"
  1076    not overlay_matches
  1077}
  1078
  1079default privileged_matches := false
  1080
  1081privileged_matches {
  1082    input.rule == "create_container"
  1083    some container in data.metadata.matches[input.containerID]
  1084    privileged_ok(container.allow_elevated)
  1085}
  1086
  1087errors["privileged escalation not allowed"] {
  1088    input.rule in ["create_container"]
  1089    not privileged_matches
  1090}
  1091
  1092default command_matches := false
  1093
  1094command_matches {
  1095    input.rule == "create_container"
  1096    some container in data.metadata.matches[input.containerID]
  1097    command_ok(container.command)
  1098}
  1099
  1100command_matches {
  1101    input.rule == "exec_in_container"
  1102    some container in data.metadata.matches[input.containerID]
  1103    some process in container.exec_processes
  1104    command_ok(process.command)
  1105}
  1106
  1107command_matches {
  1108    input.rule == "exec_external"
  1109    some process in candidate_external_processes
  1110    command_ok(process.command)
  1111}
  1112
  1113errors["invalid command"] {
  1114    input.rule in ["create_container", "exec_in_container", "exec_external"]
  1115    not command_matches
  1116}
  1117
  1118env_matches(env) {
  1119    input.rule in ["create_container", "exec_in_container"]
  1120    some container in data.metadata.matches[input.containerID]
  1121    some rule in container.env_rules
  1122    env_ok(rule.pattern, rule.strategy, env)
  1123}
  1124
  1125env_matches(env) {
  1126    input.rule in ["exec_external"]
  1127    some process in candidate_external_processes
  1128    some rule in process.env_rules
  1129    env_ok(rule.pattern, rule.strategy, env)
  1130}
  1131
  1132errors[envError] {
  1133    input.rule in ["create_container", "exec_in_container", "exec_external"]
  1134    bad_envs := [invalid |
  1135        env := input.envList[_]
  1136        not env_matches(env)
  1137        parts := split(env, "=")
  1138        invalid = parts[0]
  1139    ]
  1140
  1141    count(bad_envs) > 0
  1142    envError := concat(" ", ["invalid env list:", concat(",", bad_envs)])
  1143}
  1144
  1145env_rule_matches(rule) {
  1146    some env in input.envList
  1147    env_ok(rule.pattern, rule.strategy, env)
  1148}
  1149
  1150errors["missing required environment variable"] {
  1151    input.rule == "create_container"
  1152
  1153    not container_started
  1154    possible_containers := [container |
  1155        container := data.metadata.matches[input.containerID][_]
  1156        noNewPrivileges_ok(container.no_new_privileges)
  1157        user_ok(container.user)
  1158        privileged_ok(container.allow_elevated)
  1159        workingDirectory_ok(container.working_dir)
  1160        command_ok(container.command)
  1161        mountList_ok(container.mounts, container.allow_elevated)
  1162    ]
  1163
  1164    count(possible_containers) > 0
  1165
  1166    containers := [container |
  1167        container := possible_containers[_]
  1168        missing_rules := {invalid |
  1169            invalid := {rule |
  1170                rule := container.env_rules[_]
  1171                rule.required
  1172                not env_rule_matches(rule)
  1173            }
  1174            count(invalid) > 0
  1175        }
  1176        count(missing_rules) > 0
  1177    ]
  1178
  1179    count(containers) > 0
  1180}
  1181
  1182errors["missing required environment variable"] {
  1183    input.rule == "exec_in_container"
  1184
  1185    container_started
  1186    possible_containers := [container |
  1187        container := data.metadata.matches[input.containerID][_]
  1188        noNewPrivileges_ok(container.no_new_privileges)
  1189        user_ok(container.user)
  1190        workingDirectory_ok(container.working_dir)
  1191        some process in container.exec_processes
  1192        command_ok(process.command)
  1193    ]
  1194
  1195    count(possible_containers) > 0
  1196
  1197    containers := [container |
  1198        container := possible_containers[_]
  1199        missing_rules := {invalid |
  1200            invalid := {rule |
  1201                rule := container.env_rules[_]
  1202                rule.required
  1203                not env_rule_matches(rule)
  1204            }
  1205            count(invalid) > 0
  1206        }
  1207        count(missing_rules) > 0
  1208    ]
  1209
  1210    count(containers) > 0
  1211}
  1212
  1213errors["missing required environment variable"] {
  1214    input.rule == "exec_external"
  1215
  1216    possible_processes := [process |
  1217        process := candidate_external_processes[_]
  1218        workingDirectory_ok(process.working_dir)
  1219        command_ok(process.command)
  1220    ]
  1221
  1222    count(possible_processes) > 0
  1223
  1224    processes := [process |
  1225        process := possible_processes[_]
  1226        missing_rules := {invalid |
  1227            invalid := {rule |
  1228                rule := process.env_rules[_]
  1229                rule.required
  1230                not env_rule_matches(rule)
  1231            }
  1232            count(invalid) > 0
  1233        }
  1234        count(missing_rules) > 0
  1235    ]
  1236
  1237    count(processes) > 0
  1238}
  1239
  1240default workingDirectory_matches := false
  1241
  1242workingDirectory_matches {
  1243    input.rule in ["create_container", "exec_in_container"]
  1244    some container in data.metadata.matches[input.containerID]
  1245    workingDirectory_ok(container.working_dir)
  1246}
  1247
  1248workingDirectory_matches {
  1249    input.rule == "exec_external"
  1250    some process in candidate_external_processes
  1251    workingDirectory_ok(process.working_dir)
  1252}
  1253
  1254errors["invalid working directory"] {
  1255    input.rule in ["create_container", "exec_in_container", "exec_external"]
  1256    not workingDirectory_matches
  1257}
  1258
  1259mount_matches(mount) {
  1260    some container in data.metadata.matches[input.containerID]
  1261    mount_ok(container.mounts, container.allow_elevated, mount)
  1262}
  1263
  1264errors[mountError] {
  1265    input.rule == "create_container"
  1266    bad_mounts := [mount.destination |
  1267        mount := input.mounts[_]
  1268        not mount_matches(mount)
  1269    ]
  1270
  1271    count(bad_mounts) > 0
  1272    mountError := concat(" ", ["invalid mount list:", concat(",", bad_mounts)])
  1273}
  1274
  1275default signal_allowed := false
  1276
  1277signal_allowed {
  1278    some container in data.metadata.matches[input.containerID]
  1279    signal_ok(container.signals)
  1280}
  1281
  1282signal_allowed {
  1283    some container in data.metadata.matches[input.containerID]
  1284    some process in container.exec_processes
  1285    command_ok(process.command)
  1286    signal_ok(process.signals)
  1287}
  1288
  1289errors["target isn't allowed to receive the signal"] {
  1290    input.rule == "signal_container_process"
  1291    not signal_allowed
  1292}
  1293
  1294errors["device already mounted at path"] {
  1295    input.rule == "plan9_mount"
  1296    plan9_mounted(input.target)
  1297}
  1298
  1299errors["no device at path to unmount"] {
  1300    input.rule == "plan9_unmount"
  1301    not plan9_mounted(input.unmountTarget)
  1302}
  1303
  1304default fragment_issuer_matches := false
  1305
  1306fragment_issuer_matches {
  1307    some fragment in candidate_fragments
  1308    fragment.issuer == input.issuer
  1309}
  1310
  1311errors["invalid fragment issuer"] {
  1312    input.rule == "load_fragment"
  1313    not fragment_issuer_matches
  1314}
  1315
  1316default fragment_feed_matches := false
  1317
  1318fragment_feed_matches {
  1319    some fragment in candidate_fragments
  1320    fragment.issuer == input.issuer
  1321    fragment.feed == input.feed
  1322}
  1323
  1324fragment_feed_matches {
  1325    input.feed in data.metadata.issuers[input.issuer]
  1326}
  1327
  1328errors["invalid fragment feed"] {
  1329    input.rule == "load_fragment"
  1330    fragment_issuer_matches
  1331    not fragment_feed_matches
  1332}
  1333
  1334default fragment_version_is_valid := false
  1335
  1336fragment_version_is_valid {
  1337    some fragment in candidate_fragments
  1338    fragment.issuer == input.issuer
  1339    fragment.feed == input.feed
  1340    svn_ok(data[input.namespace].svn, fragment.minimum_svn)
  1341}
  1342
  1343default svn_mismatch := false
  1344
  1345svn_mismatch {
  1346    some fragment in candidate_fragments
  1347    fragment.issuer == input.issuer
  1348    fragment.feed == input.feed
  1349    to_number(data[input.namespace].svn)
  1350    semver.is_valid(fragment.minimum_svn)
  1351}
  1352
  1353svn_mismatch {
  1354    some fragment in candidate_fragments
  1355    fragment.issuer == input.issuer
  1356    fragment.feed == input.feed
  1357    semver.is_valid(data[input.namespace].svn)
  1358    to_number(fragment.minimum_svn)
  1359}
  1360
  1361errors["fragment svn is below the specified minimum"] {
  1362    input.rule == "load_fragment"
  1363    fragment_feed_matches
  1364    not svn_mismatch
  1365    not fragment_version_is_valid
  1366}
  1367
  1368errors["fragment svn and the specified minimum are different types"] {
  1369    input.rule == "load_fragment"
  1370    fragment_feed_matches
  1371    svn_mismatch
  1372}
  1373
  1374errors["scratch already mounted at path"] {
  1375    input.rule == "scratch_mount"
  1376    scratch_mounted(input.target)
  1377}
  1378
  1379errors["unencrypted scratch not allowed"] {
  1380    input.rule == "scratch_mount"
  1381    not allow_unencrypted_scratch
  1382    not input.encrypted
  1383}
  1384
  1385errors["no scratch at path to unmount"] {
  1386    input.rule == "scratch_unmount"
  1387    not scratch_mounted(input.unmountTarget)
  1388}
  1389
  1390errors[framework_version_error] {
  1391    policy_framework_version == null
  1392    framework_version_error := concat(" ", ["framework_version is missing. Current version:", version])
  1393}
  1394
  1395errors[framework_version_error] {
  1396    semver.compare(policy_framework_version, version) > 0
  1397    framework_version_error := concat(" ", ["framework_version is ahead of the current version:", policy_framework_version, "is greater than", version])
  1398}
  1399
  1400errors[fragment_framework_version_error] {
  1401    input.namespace
  1402    fragment_framework_version == null
  1403    fragment_framework_version_error := concat(" ", ["fragment framework_version is missing. Current version:", version])
  1404}
  1405
  1406errors[fragment_framework_version_error] {
  1407    input.namespace
  1408    semver.compare(fragment_framework_version, version) > 0
  1409    fragment_framework_version_error := concat(" ", ["fragment framework_version is ahead of the current version:", fragment_framework_version, "is greater than", version])
  1410}
  1411
  1412errors["containers only distinguishable by allow_stdio_access"] {
  1413    input.rule == "create_container"
  1414
  1415    not container_started
  1416    possible_after_initial_containers := [container |
  1417        container := data.metadata.matches[input.containerID][_]
  1418        noNewPrivileges_ok(container.no_new_privileges)
  1419        user_ok(container.user)
  1420        privileged_ok(container.allow_elevated)
  1421        workingDirectory_ok(container.working_dir)
  1422        command_ok(container.command)
  1423        mountList_ok(container.mounts, container.allow_elevated)
  1424        seccomp_ok(container.seccomp_profile_sha256)
  1425    ]
  1426
  1427    count(possible_after_initial_containers) > 0
  1428
  1429    # check to see if the environment variables match, dropping
  1430    # them if allowed (and necessary)
  1431    env_list := valid_envs_for_all(possible_after_initial_containers)
  1432    possible_after_env_containers := [container |
  1433        container := possible_after_initial_containers[_]
  1434        envList_ok(container.env_rules, env_list)
  1435    ]
  1436
  1437    count(possible_after_env_containers) > 0
  1438
  1439    # check to see if the capabilities variables match, dropping
  1440    # them if allowed (and necessary)
  1441    caps_list := valid_caps_for_all(possible_after_env_containers, input.privileged)
  1442    possible_after_caps_containers := [container |
  1443        container := possible_after_env_containers[_]
  1444        caps_ok(get_capabilities(container, input.privileged), caps_list)
  1445    ]
  1446
  1447    count(possible_after_caps_containers) > 0
  1448
  1449    # set final container list
  1450    containers := possible_after_caps_containers
  1451
  1452    allow_stdio_access := containers[0].allow_stdio_access
  1453    some c in containers
  1454    c.allow_stdio_access != allow_stdio_access
  1455}
  1456
  1457errors["external processes only distinguishable by allow_stdio_access"] {
  1458    input.rule == "exec_external"
  1459
  1460    possible_processes := [process |
  1461        process := candidate_external_processes[_]
  1462        workingDirectory_ok(process.working_dir)
  1463        command_ok(process.command)
  1464    ]
  1465
  1466    count(possible_processes) > 0
  1467
  1468    # check to see if the environment variables match, dropping
  1469    # them if allowed (and necessary)
  1470    env_list := valid_envs_for_all(possible_processes)
  1471    processes := [process |
  1472        process := possible_processes[_]
  1473        envList_ok(process.env_rules, env_list)
  1474    ]
  1475
  1476    count(processes) > 0
  1477
  1478    allow_stdio_access := processes[0].allow_stdio_access
  1479    some p in processes
  1480    p.allow_stdio_access != allow_stdio_access
  1481}
  1482
  1483
  1484default noNewPrivileges_matches := false
  1485
  1486noNewPrivileges_matches {
  1487    input.rule == "create_container"
  1488    some container in data.metadata.matches[input.containerID]
  1489    noNewPrivileges_ok(container.no_new_privileges)
  1490}
  1491
  1492noNewPrivileges_matches {
  1493    input.rule == "exec_in_container"
  1494    some container in data.metadata.matches[input.containerID]
  1495    some process in container.exec_processes
  1496    command_ok(process.command)
  1497    workingDirectory_ok(process.working_dir)
  1498    noNewPrivileges_ok(process.no_new_privileges)
  1499}
  1500
  1501errors["invalid noNewPrivileges"] {
  1502    input.rule in ["create_container", "exec_in_container"]
  1503    not noNewPrivileges_matches
  1504}
  1505
  1506default user_matches := false
  1507
  1508user_matches {
  1509    input.rule == "create_container"
  1510    some container in data.metadata.matches[input.containerID]
  1511    user_ok(container.user)
  1512}
  1513
  1514user_matches {
  1515    input.rule == "exec_in_container"
  1516    some container in data.metadata.matches[input.containerID]
  1517    some process in container.exec_processes
  1518    command_ok(process.command)
  1519    workingDirectory_ok(process.working_dir)
  1520    user_ok(process.user)
  1521}
  1522
  1523errors["invalid user"] {
  1524    input.rule in ["create_container", "exec_in_container"]
  1525    not user_matches
  1526}
  1527
  1528errors["capabilities don't match"] {
  1529    input.rule == "create_container"
  1530
  1531    not container_started
  1532
  1533    possible_after_initial_containers := [container |
  1534        container := data.metadata.matches[input.containerID][_]
  1535        privileged_ok(container.allow_elevated)
  1536        noNewPrivileges_ok(container.no_new_privileges)
  1537        user_ok(container.user)
  1538        workingDirectory_ok(container.working_dir)
  1539        command_ok(container.command)
  1540        mountList_ok(container.mounts, container.allow_elevated)
  1541        seccomp_ok(container.seccomp_profile_sha256)
  1542    ]
  1543
  1544    count(possible_after_initial_containers) > 0
  1545
  1546    # check to see if the environment variables match, dropping
  1547    # them if allowed (and necessary)
  1548    env_list := valid_envs_for_all(possible_after_initial_containers)
  1549    possible_after_env_containers := [container |
  1550        container := possible_after_initial_containers[_]
  1551        envList_ok(container.env_rules, env_list)
  1552    ]
  1553
  1554    count(possible_after_env_containers) > 0
  1555
  1556    # check to see if the capabilities variables match, dropping
  1557    # them if allowed (and necessary)
  1558    caps_list := valid_caps_for_all(possible_after_env_containers, input.privileged)
  1559    possible_after_caps_containers := [container |
  1560        container := possible_after_env_containers[_]
  1561        caps_ok(get_capabilities(container, input.privileged), caps_list)
  1562    ]
  1563
  1564    count(possible_after_caps_containers) == 0
  1565}
  1566
  1567errors["capabilities don't match"] {
  1568    input.rule == "exec_in_container"
  1569
  1570    container_started
  1571
  1572    possible_after_initial_containers := [container |
  1573        container := data.metadata.matches[input.containerID][_]
  1574        workingDirectory_ok(container.working_dir)
  1575        noNewPrivileges_ok(container.no_new_privileges)
  1576        user_ok(container.user)
  1577        some process in container.exec_processes
  1578        command_ok(process.command)
  1579    ]
  1580
  1581    count(possible_after_initial_containers) > 0
  1582
  1583    # check to see if the environment variables match, dropping
  1584    # them if allowed (and necessary)
  1585    env_list := valid_envs_for_all(possible_after_initial_containers)
  1586    possible_after_env_containers := [container |
  1587        container := possible_after_initial_containers[_]
  1588        envList_ok(container.env_rules, env_list)
  1589    ]
  1590
  1591    count(possible_after_env_containers) > 0
  1592
  1593    # check to see if the capabilities variables match, dropping
  1594    # them if allowed (and necessary)
  1595    caps_list := valid_caps_for_all(possible_after_env_containers, container_privileged)
  1596    possible_after_caps_containers := [container |
  1597        container := possible_after_env_containers[_]
  1598        caps_ok(get_capabilities(container, container_privileged), caps_list)
  1599    ]
  1600
  1601    count(possible_after_caps_containers) == 0
  1602}
  1603
  1604# covers exec_in_container as well. it shouldn't be possible to ever get
  1605# an exec_in_container as it "inherits" capabilities rules from create_container
  1606errors["containers only distinguishable by capabilties"] {
  1607    input.rule == "create_container"
  1608
  1609    allow_capability_dropping
  1610    not container_started
  1611
  1612    # narrow the matches based upon command, working directory, and
  1613    # mount list
  1614    possible_after_initial_containers := [container |
  1615        container := data.metadata.matches[input.containerID][_]
  1616        # NB any change to these narrowing conditions should be reflected in
  1617        # the error handling, such that error messaging correctly reflects
  1618        # the narrowing process.
  1619        noNewPrivileges_ok(container.no_new_privileges)
  1620        user_ok(container.user)
  1621        privileged_ok(container.allow_elevated)
  1622        workingDirectory_ok(container.working_dir)
  1623        command_ok(container.command)
  1624        mountList_ok(container.mounts, container.allow_elevated)
  1625    ]
  1626
  1627    count(possible_after_initial_containers) > 0
  1628
  1629    # check to see if the environment variables match, dropping
  1630    # them if allowed (and necessary)
  1631    env_list := valid_envs_for_all(possible_after_initial_containers)
  1632    possible_after_env_containers := [container |
  1633        container := possible_after_initial_containers[_]
  1634        envList_ok(container.env_rules, env_list)
  1635    ]
  1636
  1637    count(possible_after_env_containers) > 0
  1638
  1639    largest := largest_caps_sets_for_all(possible_after_env_containers, input.privileged)
  1640    not all_caps_sets_are_equal(largest)
  1641}
  1642
  1643default seccomp_matches := false
  1644
  1645seccomp_matches {
  1646    input.rule == "create_container"
  1647    some container in data.metadata.matches[input.containerID]
  1648    seccomp_ok(container.seccomp_profile_sha256)
  1649}
  1650
  1651errors["invalid seccomp"] {
  1652    input.rule == "create_container"
  1653    not seccomp_matches
  1654}
  1655
  1656default error_objects := null
  1657
  1658error_objects := containers {
  1659    input.rule == "create_container"
  1660    containers := data.metadata.matches[input.containerID]
  1661}
  1662
  1663error_objects := processes {
  1664    input.rule == "exec_in_container"
  1665    processes := [process |
  1666        container := data.metadata.matches[input.containerID][_]
  1667        process := container.exec_processes[_]
  1668    ]
  1669}
  1670
  1671error_objects := processes {
  1672    input.rule == "exec_external"
  1673    processes := candidate_external_processes
  1674}
  1675
  1676error_objects := fragments {
  1677    input.rule == "load_fragment"
  1678    fragments := candidate_fragments
  1679}
  1680
  1681
  1682################################################################################
  1683# Logic for providing backwards compatibility for framework data objects
  1684################################################################################
  1685
  1686
  1687check_container(raw_container, framework_version) := container {
  1688    semver.compare(framework_version, version) == 0
  1689    container := raw_container
  1690}
  1691
  1692check_container(raw_container, framework_version) := container {
  1693    semver.compare(framework_version, version) < 0
  1694    container := {
  1695        # Base fields
  1696        "command": raw_container.command,
  1697        "env_rules": raw_container.env_rules,
  1698        "layers": raw_container.layers,
  1699        "mounts": raw_container.mounts,
  1700        "allow_elevated": raw_container.allow_elevated,
  1701        "working_dir": raw_container.working_dir,
  1702        "exec_processes": raw_container.exec_processes,
  1703        "signals": raw_container.signals,
  1704        "allow_stdio_access": raw_container.allow_stdio_access,
  1705        # Additional fields need to have default logic applied
  1706        "no_new_privileges": check_no_new_privileges(raw_container, framework_version),
  1707        "user": check_user(raw_container, framework_version),
  1708        "capabilities": check_capabilities(raw_container, framework_version),
  1709        "seccomp_profile_sha256": check_seccomp_profile_sha256(raw_container, framework_version),
  1710    }
  1711}
  1712
  1713check_no_new_privileges(raw_container, framework_version) := no_new_privileges {
  1714    semver.compare(framework_version, "0.2.0") >= 0
  1715    no_new_privileges := raw_container.no_new_privileges
  1716}
  1717
  1718check_no_new_privileges(raw_container, framework_version) := no_new_privileges {
  1719    semver.compare(framework_version, "0.2.0") < 0
  1720    no_new_privileges := false
  1721}
  1722
  1723check_user(raw_container, framework_version) := user {
  1724    semver.compare(framework_version, "0.2.1") >= 0
  1725    user := raw_container.user
  1726}
  1727
  1728check_user(raw_container, framework_version) := user {
  1729    semver.compare(framework_version, "0.2.1") < 0
  1730    user := {
  1731        "umask": "0022",
  1732        "user_idname": {
  1733            "pattern": "",
  1734            "strategy": "any"
  1735        },
  1736        "group_idnames": [
  1737            {
  1738                "pattern": "",
  1739                "strategy": "any"
  1740            }
  1741        ]
  1742    }
  1743}
  1744
  1745check_capabilities(raw_container, framework_version) := capabilities {
  1746    semver.compare(framework_version, "0.2.2") >= 0
  1747    capabilities := raw_container.capabilities
  1748}
  1749
  1750check_capabilities(raw_container, framework_version) := capabilities {
  1751    semver.compare(framework_version, "0.2.2") < 0
  1752    # we cannot determine a reasonable default at the time this is called,
  1753    # which is either during `mount_overlay` or `load_fragment`, and so
  1754    # we set it to `null`, which indicates that the capabilities should
  1755    # be determined dynamically when needed.
  1756    capabilities := null
  1757}
  1758
  1759check_seccomp_profile_sha256(raw_container, framework_version) := seccomp_profile_sha256 {
  1760    semver.compare(framework_version, "0.2.3") >= 0
  1761    seccomp_profile_sha256 := raw_container.seccomp_profile_sha256
  1762}
  1763
  1764check_seccomp_profile_sha256(raw_container, framework_version) := seccomp_profile_sha256 {
  1765    semver.compare(framework_version, "0.2.3") < 0
  1766    seccomp_profile_sha256 := ""
  1767}
  1768
  1769check_external_process(raw_process, framework_version) := process {
  1770    semver.compare(framework_version, version) == 0
  1771    process := raw_process
  1772}
  1773
  1774check_external_process(raw_process, framework_version) := process {
  1775    semver.compare(framework_version, version) < 0
  1776    process := {
  1777        # Base fields
  1778        "command": raw_process.command,
  1779        "env_rules": raw_process.env_rules,
  1780        "working_dir": raw_process.working_dir,
  1781        "allow_stdio_access": raw_process.allow_stdio_access,
  1782        # Additional fields need to have default logic applied
  1783    }
  1784}
  1785
  1786check_fragment(raw_fragment, framework_version) := fragment {
  1787    semver.compare(framework_version, version) == 0
  1788    fragment := raw_fragment
  1789}
  1790
  1791check_fragment(raw_fragment, framework_version) := fragment {
  1792    semver.compare(framework_version, version) < 0
  1793    fragment := {
  1794        # Base fields
  1795        "issuer": raw_fragment.issuer,
  1796        "feed": raw_fragment.feed,
  1797        "minimum_svn": raw_fragment.minimum_svn,
  1798        "includes": raw_fragment.includes,
  1799        # Additional fields need to have default logic applied
  1800    }
  1801}
  1802
  1803# base policy-level flags
  1804allow_properties_access := data.policy.allow_properties_access
  1805allow_dump_stacks := data.policy.allow_dump_stacks
  1806allow_runtime_logging := data.policy.allow_runtime_logging
  1807allow_environment_variable_dropping := data.policy.allow_environment_variable_dropping
  1808allow_unencrypted_scratch := data.policy.allow_unencrypted_scratch
  1809
  1810# all flags not in the base set need to have default logic applied
  1811
  1812default allow_capability_dropping := false
  1813
  1814allow_capability_dropping := flag {
  1815    semver.compare(policy_framework_version, "0.2.2") >= 0
  1816    flag := data.policy.allow_capability_dropping
  1817}
  1818
  1819default policy_framework_version := null
  1820default policy_api_version := null
  1821
  1822policy_framework_version := data.policy.framework_version
  1823policy_api_version := data.policy.api_version
  1824
  1825# deprecated
  1826policy_framework_version := data.policy.framework_svn
  1827policy_api_version := data.policy.api_svn
  1828fragment_framework_version := data[input.namespace].framework_svn

View as plain text