...

Source file src/github.com/cilium/ebpf/linker.go

Documentation: github.com/cilium/ebpf

     1  package ebpf
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"sync"
     7  
     8  	"github.com/cilium/ebpf/asm"
     9  	"github.com/cilium/ebpf/btf"
    10  )
    11  
    12  // splitSymbols splits insns into subsections delimited by Symbol Instructions.
    13  // insns cannot be empty and must start with a Symbol Instruction.
    14  //
    15  // The resulting map is indexed by Symbol name.
    16  func splitSymbols(insns asm.Instructions) (map[string]asm.Instructions, error) {
    17  	if len(insns) == 0 {
    18  		return nil, errors.New("insns is empty")
    19  	}
    20  
    21  	if insns[0].Symbol() == "" {
    22  		return nil, errors.New("insns must start with a Symbol")
    23  	}
    24  
    25  	var name string
    26  	progs := make(map[string]asm.Instructions)
    27  	for _, ins := range insns {
    28  		if sym := ins.Symbol(); sym != "" {
    29  			if progs[sym] != nil {
    30  				return nil, fmt.Errorf("insns contains duplicate Symbol %s", sym)
    31  			}
    32  			name = sym
    33  		}
    34  
    35  		progs[name] = append(progs[name], ins)
    36  	}
    37  
    38  	return progs, nil
    39  }
    40  
    41  // The linker is responsible for resolving bpf-to-bpf calls between programs
    42  // within an ELF. Each BPF program must be a self-contained binary blob,
    43  // so when an instruction in one ELF program section wants to jump to
    44  // a function in another, the linker needs to pull in the bytecode
    45  // (and BTF info) of the target function and concatenate the instruction
    46  // streams.
    47  //
    48  // Later on in the pipeline, all call sites are fixed up with relative jumps
    49  // within this newly-created instruction stream to then finally hand off to
    50  // the kernel with BPF_PROG_LOAD.
    51  //
    52  // Each function is denoted by an ELF symbol and the compiler takes care of
    53  // register setup before each jump instruction.
    54  
    55  // hasFunctionReferences returns true if insns contains one or more bpf2bpf
    56  // function references.
    57  func hasFunctionReferences(insns asm.Instructions) bool {
    58  	for _, i := range insns {
    59  		if i.IsFunctionReference() {
    60  			return true
    61  		}
    62  	}
    63  	return false
    64  }
    65  
    66  // applyRelocations collects and applies any CO-RE relocations in insns.
    67  //
    68  // Passing a nil target will relocate against the running kernel. insns are
    69  // modified in place.
    70  func applyRelocations(insns asm.Instructions, local, target *btf.Spec) error {
    71  	var relos []*btf.CORERelocation
    72  	var reloInsns []*asm.Instruction
    73  	iter := insns.Iterate()
    74  	for iter.Next() {
    75  		if relo := btf.CORERelocationMetadata(iter.Ins); relo != nil {
    76  			relos = append(relos, relo)
    77  			reloInsns = append(reloInsns, iter.Ins)
    78  		}
    79  	}
    80  
    81  	if len(relos) == 0 {
    82  		return nil
    83  	}
    84  
    85  	target, err := maybeLoadKernelBTF(target)
    86  	if err != nil {
    87  		return err
    88  	}
    89  
    90  	fixups, err := btf.CORERelocate(local, target, relos)
    91  	if err != nil {
    92  		return err
    93  	}
    94  
    95  	for i, fixup := range fixups {
    96  		if err := fixup.Apply(reloInsns[i]); err != nil {
    97  			return fmt.Errorf("apply fixup %s: %w", &fixup, err)
    98  		}
    99  	}
   100  
   101  	return nil
   102  }
   103  
   104  // flattenPrograms resolves bpf-to-bpf calls for a set of programs.
   105  //
   106  // Links all programs in names by modifying their ProgramSpec in progs.
   107  func flattenPrograms(progs map[string]*ProgramSpec, names []string) {
   108  	// Pre-calculate all function references.
   109  	refs := make(map[*ProgramSpec][]string)
   110  	for _, prog := range progs {
   111  		refs[prog] = prog.Instructions.FunctionReferences()
   112  	}
   113  
   114  	// Create a flattened instruction stream, but don't modify progs yet to
   115  	// avoid linking multiple times.
   116  	flattened := make([]asm.Instructions, 0, len(names))
   117  	for _, name := range names {
   118  		flattened = append(flattened, flattenInstructions(name, progs, refs))
   119  	}
   120  
   121  	// Finally, assign the flattened instructions.
   122  	for i, name := range names {
   123  		progs[name].Instructions = flattened[i]
   124  	}
   125  }
   126  
   127  // flattenInstructions resolves bpf-to-bpf calls for a single program.
   128  //
   129  // Flattens the instructions of prog by concatenating the instructions of all
   130  // direct and indirect dependencies.
   131  //
   132  // progs contains all referenceable programs, while refs contain the direct
   133  // dependencies of each program.
   134  func flattenInstructions(name string, progs map[string]*ProgramSpec, refs map[*ProgramSpec][]string) asm.Instructions {
   135  	prog := progs[name]
   136  
   137  	insns := make(asm.Instructions, len(prog.Instructions))
   138  	copy(insns, prog.Instructions)
   139  
   140  	// Add all direct references of prog to the list of to be linked programs.
   141  	pending := make([]string, len(refs[prog]))
   142  	copy(pending, refs[prog])
   143  
   144  	// All references for which we've appended instructions.
   145  	linked := make(map[string]bool)
   146  
   147  	// Iterate all pending references. We can't use a range since pending is
   148  	// modified in the body below.
   149  	for len(pending) > 0 {
   150  		var ref string
   151  		ref, pending = pending[0], pending[1:]
   152  
   153  		if linked[ref] {
   154  			// We've already linked this ref, don't append instructions again.
   155  			continue
   156  		}
   157  
   158  		progRef := progs[ref]
   159  		if progRef == nil {
   160  			// We don't have instructions that go with this reference. This
   161  			// happens when calling extern functions.
   162  			continue
   163  		}
   164  
   165  		insns = append(insns, progRef.Instructions...)
   166  		linked[ref] = true
   167  
   168  		// Make sure we link indirect references.
   169  		pending = append(pending, refs[progRef]...)
   170  	}
   171  
   172  	return insns
   173  }
   174  
   175  // fixupAndValidate is called by the ELF reader right before marshaling the
   176  // instruction stream. It performs last-minute adjustments to the program and
   177  // runs some sanity checks before sending it off to the kernel.
   178  func fixupAndValidate(insns asm.Instructions) error {
   179  	iter := insns.Iterate()
   180  	for iter.Next() {
   181  		ins := iter.Ins
   182  
   183  		// Map load was tagged with a Reference, but does not contain a Map pointer.
   184  		if ins.IsLoadFromMap() && ins.Reference() != "" && ins.Map() == nil {
   185  			return fmt.Errorf("instruction %d: map %s: %w", iter.Index, ins.Reference(), asm.ErrUnsatisfiedMapReference)
   186  		}
   187  
   188  		fixupProbeReadKernel(ins)
   189  	}
   190  
   191  	return nil
   192  }
   193  
   194  // fixupProbeReadKernel replaces calls to bpf_probe_read_{kernel,user}(_str)
   195  // with bpf_probe_read(_str) on kernels that don't support it yet.
   196  func fixupProbeReadKernel(ins *asm.Instruction) {
   197  	if !ins.IsBuiltinCall() {
   198  		return
   199  	}
   200  
   201  	// Kernel supports bpf_probe_read_kernel, nothing to do.
   202  	if haveProbeReadKernel() == nil {
   203  		return
   204  	}
   205  
   206  	switch asm.BuiltinFunc(ins.Constant) {
   207  	case asm.FnProbeReadKernel, asm.FnProbeReadUser:
   208  		ins.Constant = int64(asm.FnProbeRead)
   209  	case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr:
   210  		ins.Constant = int64(asm.FnProbeReadStr)
   211  	}
   212  }
   213  
   214  var kernelBTF struct {
   215  	sync.Mutex
   216  	spec *btf.Spec
   217  }
   218  
   219  // maybeLoadKernelBTF loads the current kernel's BTF if spec is nil, otherwise
   220  // it returns spec unchanged.
   221  //
   222  // The kernel BTF is cached for the lifetime of the process.
   223  func maybeLoadKernelBTF(spec *btf.Spec) (*btf.Spec, error) {
   224  	if spec != nil {
   225  		return spec, nil
   226  	}
   227  
   228  	kernelBTF.Lock()
   229  	defer kernelBTF.Unlock()
   230  
   231  	if kernelBTF.spec != nil {
   232  		return kernelBTF.spec, nil
   233  	}
   234  
   235  	var err error
   236  	kernelBTF.spec, err = btf.LoadKernelSpec()
   237  	return kernelBTF.spec, err
   238  }
   239  

View as plain text