...

Source file src/github.com/cilium/ebpf/btf/core.go

Documentation: github.com/cilium/ebpf/btf

     1  package btf
     2  
     3  import (
     4  	"encoding/binary"
     5  	"errors"
     6  	"fmt"
     7  	"math"
     8  	"reflect"
     9  	"strconv"
    10  	"strings"
    11  
    12  	"github.com/cilium/ebpf/asm"
    13  )
    14  
    15  // Code in this file is derived from libbpf, which is available under a BSD
    16  // 2-Clause license.
    17  
    18  // COREFixup is the result of computing a CO-RE relocation for a target.
    19  type COREFixup struct {
    20  	kind   coreKind
    21  	local  uint32
    22  	target uint32
    23  	// True if there is no valid fixup. The instruction is replaced with an
    24  	// invalid dummy.
    25  	poison bool
    26  	// True if the validation of the local value should be skipped. Used by
    27  	// some kinds of bitfield relocations.
    28  	skipLocalValidation bool
    29  }
    30  
    31  func (f *COREFixup) equal(other COREFixup) bool {
    32  	return f.local == other.local && f.target == other.target
    33  }
    34  
    35  func (f *COREFixup) String() string {
    36  	if f.poison {
    37  		return fmt.Sprintf("%s=poison", f.kind)
    38  	}
    39  	return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target)
    40  }
    41  
    42  func (f *COREFixup) Apply(ins *asm.Instruction) error {
    43  	if f.poison {
    44  		const badRelo = 0xbad2310
    45  
    46  		*ins = asm.BuiltinFunc(badRelo).Call()
    47  		return nil
    48  	}
    49  
    50  	switch class := ins.OpCode.Class(); class {
    51  	case asm.LdXClass, asm.StClass, asm.StXClass:
    52  		if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset {
    53  			return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local)
    54  		}
    55  
    56  		if f.target > math.MaxInt16 {
    57  			return fmt.Errorf("offset %d exceeds MaxInt16", f.target)
    58  		}
    59  
    60  		ins.Offset = int16(f.target)
    61  
    62  	case asm.LdClass:
    63  		if !ins.IsConstantLoad(asm.DWord) {
    64  			return fmt.Errorf("not a dword-sized immediate load")
    65  		}
    66  
    67  		if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant {
    68  			return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f)
    69  		}
    70  
    71  		ins.Constant = int64(f.target)
    72  
    73  	case asm.ALUClass:
    74  		if ins.OpCode.ALUOp() == asm.Swap {
    75  			return fmt.Errorf("relocation against swap")
    76  		}
    77  
    78  		fallthrough
    79  
    80  	case asm.ALU64Class:
    81  		if src := ins.OpCode.Source(); src != asm.ImmSource {
    82  			return fmt.Errorf("invalid source %s", src)
    83  		}
    84  
    85  		if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant {
    86  			return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins)
    87  		}
    88  
    89  		if f.target > math.MaxInt32 {
    90  			return fmt.Errorf("immediate %d exceeds MaxInt32", f.target)
    91  		}
    92  
    93  		ins.Constant = int64(f.target)
    94  
    95  	default:
    96  		return fmt.Errorf("invalid class %s", class)
    97  	}
    98  
    99  	return nil
   100  }
   101  
   102  func (f COREFixup) isNonExistant() bool {
   103  	return f.kind.checksForExistence() && f.target == 0
   104  }
   105  
   106  // coreKind is the type of CO-RE relocation as specified in BPF source code.
   107  type coreKind uint32
   108  
   109  const (
   110  	reloFieldByteOffset coreKind = iota /* field byte offset */
   111  	reloFieldByteSize                   /* field size in bytes */
   112  	reloFieldExists                     /* field existence in target kernel */
   113  	reloFieldSigned                     /* field signedness (0 - unsigned, 1 - signed) */
   114  	reloFieldLShiftU64                  /* bitfield-specific left bitshift */
   115  	reloFieldRShiftU64                  /* bitfield-specific right bitshift */
   116  	reloTypeIDLocal                     /* type ID in local BPF object */
   117  	reloTypeIDTarget                    /* type ID in target kernel */
   118  	reloTypeExists                      /* type existence in target kernel */
   119  	reloTypeSize                        /* type size in bytes */
   120  	reloEnumvalExists                   /* enum value existence in target kernel */
   121  	reloEnumvalValue                    /* enum value integer value */
   122  )
   123  
   124  func (k coreKind) checksForExistence() bool {
   125  	return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists
   126  }
   127  
   128  func (k coreKind) String() string {
   129  	switch k {
   130  	case reloFieldByteOffset:
   131  		return "byte_off"
   132  	case reloFieldByteSize:
   133  		return "byte_sz"
   134  	case reloFieldExists:
   135  		return "field_exists"
   136  	case reloFieldSigned:
   137  		return "signed"
   138  	case reloFieldLShiftU64:
   139  		return "lshift_u64"
   140  	case reloFieldRShiftU64:
   141  		return "rshift_u64"
   142  	case reloTypeIDLocal:
   143  		return "local_type_id"
   144  	case reloTypeIDTarget:
   145  		return "target_type_id"
   146  	case reloTypeExists:
   147  		return "type_exists"
   148  	case reloTypeSize:
   149  		return "type_size"
   150  	case reloEnumvalExists:
   151  		return "enumval_exists"
   152  	case reloEnumvalValue:
   153  		return "enumval_value"
   154  	default:
   155  		return "unknown"
   156  	}
   157  }
   158  
   159  // CORERelocate calculates the difference in types between local and target.
   160  //
   161  // Returns a list of fixups which can be applied to instructions to make them
   162  // match the target type(s).
   163  //
   164  // Fixups are returned in the order of relos, e.g. fixup[i] is the solution
   165  // for relos[i].
   166  func CORERelocate(local, target *Spec, relos []*CORERelocation) ([]COREFixup, error) {
   167  	if local.byteOrder != target.byteOrder {
   168  		return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
   169  	}
   170  
   171  	type reloGroup struct {
   172  		relos []*CORERelocation
   173  		// Position of each relocation in relos.
   174  		indices []int
   175  	}
   176  
   177  	// Split relocations into per Type lists.
   178  	relosByType := make(map[Type]*reloGroup)
   179  	result := make([]COREFixup, len(relos))
   180  	for i, relo := range relos {
   181  		if relo.kind == reloTypeIDLocal {
   182  			// Filtering out reloTypeIDLocal here makes our lives a lot easier
   183  			// down the line, since it doesn't have a target at all.
   184  			if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
   185  				return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
   186  			}
   187  
   188  			id, err := local.TypeID(relo.typ)
   189  			if err != nil {
   190  				return nil, fmt.Errorf("%s: %w", relo.kind, err)
   191  			}
   192  
   193  			result[i] = COREFixup{
   194  				kind:   relo.kind,
   195  				local:  uint32(id),
   196  				target: uint32(id),
   197  			}
   198  			continue
   199  		}
   200  
   201  		group, ok := relosByType[relo.typ]
   202  		if !ok {
   203  			group = &reloGroup{}
   204  			relosByType[relo.typ] = group
   205  		}
   206  		group.relos = append(group.relos, relo)
   207  		group.indices = append(group.indices, i)
   208  	}
   209  
   210  	for localType, group := range relosByType {
   211  		localTypeName := localType.TypeName()
   212  		if localTypeName == "" {
   213  			return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported)
   214  		}
   215  
   216  		targets := target.namedTypes[newEssentialName(localTypeName)]
   217  		fixups, err := coreCalculateFixups(local, target, localType, targets, group.relos)
   218  		if err != nil {
   219  			return nil, fmt.Errorf("relocate %s: %w", localType, err)
   220  		}
   221  
   222  		for j, index := range group.indices {
   223  			result[index] = fixups[j]
   224  		}
   225  	}
   226  
   227  	return result, nil
   228  }
   229  
   230  var errAmbiguousRelocation = errors.New("ambiguous relocation")
   231  var errImpossibleRelocation = errors.New("impossible relocation")
   232  
   233  // coreCalculateFixups calculates the fixups for the given relocations using
   234  // the "best" target.
   235  //
   236  // The best target is determined by scoring: the less poisoning we have to do
   237  // the better the target is.
   238  func coreCalculateFixups(localSpec, targetSpec *Spec, local Type, targets []Type, relos []*CORERelocation) ([]COREFixup, error) {
   239  	localID, err := localSpec.TypeID(local)
   240  	if err != nil {
   241  		return nil, fmt.Errorf("local type ID: %w", err)
   242  	}
   243  	local = Copy(local, UnderlyingType)
   244  
   245  	bestScore := len(relos)
   246  	var bestFixups []COREFixup
   247  	for i := range targets {
   248  		targetID, err := targetSpec.TypeID(targets[i])
   249  		if err != nil {
   250  			return nil, fmt.Errorf("target type ID: %w", err)
   251  		}
   252  		target := Copy(targets[i], UnderlyingType)
   253  
   254  		score := 0 // lower is better
   255  		fixups := make([]COREFixup, 0, len(relos))
   256  		for _, relo := range relos {
   257  			fixup, err := coreCalculateFixup(localSpec.byteOrder, local, localID, target, targetID, relo)
   258  			if err != nil {
   259  				return nil, fmt.Errorf("target %s: %w", target, err)
   260  			}
   261  			if fixup.poison || fixup.isNonExistant() {
   262  				score++
   263  			}
   264  			fixups = append(fixups, fixup)
   265  		}
   266  
   267  		if score > bestScore {
   268  			// We have a better target already, ignore this one.
   269  			continue
   270  		}
   271  
   272  		if score < bestScore {
   273  			// This is the best target yet, use it.
   274  			bestScore = score
   275  			bestFixups = fixups
   276  			continue
   277  		}
   278  
   279  		// Some other target has the same score as the current one. Make sure
   280  		// the fixups agree with each other.
   281  		for i, fixup := range bestFixups {
   282  			if !fixup.equal(fixups[i]) {
   283  				return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation)
   284  			}
   285  		}
   286  	}
   287  
   288  	if bestFixups == nil {
   289  		// Nothing at all matched, probably because there are no suitable
   290  		// targets at all.
   291  		//
   292  		// Poison everything except checksForExistence.
   293  		bestFixups = make([]COREFixup, len(relos))
   294  		for i, relo := range relos {
   295  			if relo.kind.checksForExistence() {
   296  				bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0}
   297  			} else {
   298  				bestFixups[i] = COREFixup{kind: relo.kind, poison: true}
   299  			}
   300  		}
   301  	}
   302  
   303  	return bestFixups, nil
   304  }
   305  
   306  // coreCalculateFixup calculates the fixup for a single local type, target type
   307  // and relocation.
   308  func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID, target Type, targetID TypeID, relo *CORERelocation) (COREFixup, error) {
   309  	fixup := func(local, target uint32) (COREFixup, error) {
   310  		return COREFixup{kind: relo.kind, local: local, target: target}, nil
   311  	}
   312  	fixupWithoutValidation := func(local, target uint32) (COREFixup, error) {
   313  		return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil
   314  	}
   315  	poison := func() (COREFixup, error) {
   316  		if relo.kind.checksForExistence() {
   317  			return fixup(1, 0)
   318  		}
   319  		return COREFixup{kind: relo.kind, poison: true}, nil
   320  	}
   321  	zero := COREFixup{}
   322  
   323  	switch relo.kind {
   324  	case reloTypeIDTarget, reloTypeSize, reloTypeExists:
   325  		if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
   326  			return zero, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
   327  		}
   328  
   329  		err := coreAreTypesCompatible(local, target)
   330  		if errors.Is(err, errImpossibleRelocation) {
   331  			return poison()
   332  		}
   333  		if err != nil {
   334  			return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
   335  		}
   336  
   337  		switch relo.kind {
   338  		case reloTypeExists:
   339  			return fixup(1, 1)
   340  
   341  		case reloTypeIDTarget:
   342  			return fixup(uint32(localID), uint32(targetID))
   343  
   344  		case reloTypeSize:
   345  			localSize, err := Sizeof(local)
   346  			if err != nil {
   347  				return zero, err
   348  			}
   349  
   350  			targetSize, err := Sizeof(target)
   351  			if err != nil {
   352  				return zero, err
   353  			}
   354  
   355  			return fixup(uint32(localSize), uint32(targetSize))
   356  		}
   357  
   358  	case reloEnumvalValue, reloEnumvalExists:
   359  		localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target)
   360  		if errors.Is(err, errImpossibleRelocation) {
   361  			return poison()
   362  		}
   363  		if err != nil {
   364  			return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
   365  		}
   366  
   367  		switch relo.kind {
   368  		case reloEnumvalExists:
   369  			return fixup(1, 1)
   370  
   371  		case reloEnumvalValue:
   372  			return fixup(uint32(localValue.Value), uint32(targetValue.Value))
   373  		}
   374  
   375  	case reloFieldSigned:
   376  		switch local.(type) {
   377  		case *Enum:
   378  			return fixup(1, 1)
   379  		case *Int:
   380  			return fixup(
   381  				uint32(local.(*Int).Encoding&Signed),
   382  				uint32(target.(*Int).Encoding&Signed),
   383  			)
   384  		default:
   385  			return fixupWithoutValidation(0, 0)
   386  		}
   387  
   388  	case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64:
   389  		if _, ok := target.(*Fwd); ok {
   390  			// We can't relocate fields using a forward declaration, so
   391  			// skip it. If a non-forward declaration is present in the BTF
   392  			// we'll find it in one of the other iterations.
   393  			return poison()
   394  		}
   395  
   396  		localField, targetField, err := coreFindField(local, relo.accessor, target)
   397  		if errors.Is(err, errImpossibleRelocation) {
   398  			return poison()
   399  		}
   400  		if err != nil {
   401  			return zero, fmt.Errorf("target %s: %w", target, err)
   402  		}
   403  
   404  		maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) {
   405  			f.skipLocalValidation = localField.bitfieldSize > 0
   406  			return f, err
   407  		}
   408  
   409  		switch relo.kind {
   410  		case reloFieldExists:
   411  			return fixup(1, 1)
   412  
   413  		case reloFieldByteOffset:
   414  			return maybeSkipValidation(fixup(localField.offset, targetField.offset))
   415  
   416  		case reloFieldByteSize:
   417  			localSize, err := Sizeof(localField.Type)
   418  			if err != nil {
   419  				return zero, err
   420  			}
   421  
   422  			targetSize, err := Sizeof(targetField.Type)
   423  			if err != nil {
   424  				return zero, err
   425  			}
   426  			return maybeSkipValidation(fixup(uint32(localSize), uint32(targetSize)))
   427  
   428  		case reloFieldLShiftU64:
   429  			var target uint32
   430  			if byteOrder == binary.LittleEndian {
   431  				targetSize, err := targetField.sizeBits()
   432  				if err != nil {
   433  					return zero, err
   434  				}
   435  
   436  				target = uint32(64 - targetField.bitfieldOffset - targetSize)
   437  			} else {
   438  				loadWidth, err := Sizeof(targetField.Type)
   439  				if err != nil {
   440  					return zero, err
   441  				}
   442  
   443  				target = uint32(64 - Bits(loadWidth*8) + targetField.bitfieldOffset)
   444  			}
   445  			return fixupWithoutValidation(0, target)
   446  
   447  		case reloFieldRShiftU64:
   448  			targetSize, err := targetField.sizeBits()
   449  			if err != nil {
   450  				return zero, err
   451  			}
   452  
   453  			return fixupWithoutValidation(0, uint32(64-targetSize))
   454  		}
   455  	}
   456  
   457  	return zero, fmt.Errorf("relocation %s: %w", relo.kind, ErrNotSupported)
   458  }
   459  
   460  /* coreAccessor contains a path through a struct. It contains at least one index.
   461   *
   462   * The interpretation depends on the kind of the relocation. The following is
   463   * taken from struct bpf_core_relo in libbpf_internal.h:
   464   *
   465   * - for field-based relocations, string encodes an accessed field using
   466   *   a sequence of field and array indices, separated by colon (:). It's
   467   *   conceptually very close to LLVM's getelementptr ([0]) instruction's
   468   *   arguments for identifying offset to a field.
   469   * - for type-based relocations, strings is expected to be just "0";
   470   * - for enum value-based relocations, string contains an index of enum
   471   *   value within its enum type;
   472   *
   473   * Example to provide a better feel.
   474   *
   475   *   struct sample {
   476   *       int a;
   477   *       struct {
   478   *           int b[10];
   479   *       };
   480   *   };
   481   *
   482   *   struct sample s = ...;
   483   *   int x = &s->a;     // encoded as "0:0" (a is field #0)
   484   *   int y = &s->b[5];  // encoded as "0:1:0:5" (anon struct is field #1,
   485   *                      // b is field #0 inside anon struct, accessing elem #5)
   486   *   int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
   487   */
   488  type coreAccessor []int
   489  
   490  func parseCOREAccessor(accessor string) (coreAccessor, error) {
   491  	if accessor == "" {
   492  		return nil, fmt.Errorf("empty accessor")
   493  	}
   494  
   495  	parts := strings.Split(accessor, ":")
   496  	result := make(coreAccessor, 0, len(parts))
   497  	for _, part := range parts {
   498  		// 31 bits to avoid overflowing int on 32 bit platforms.
   499  		index, err := strconv.ParseUint(part, 10, 31)
   500  		if err != nil {
   501  			return nil, fmt.Errorf("accessor index %q: %s", part, err)
   502  		}
   503  
   504  		result = append(result, int(index))
   505  	}
   506  
   507  	return result, nil
   508  }
   509  
   510  func (ca coreAccessor) String() string {
   511  	strs := make([]string, 0, len(ca))
   512  	for _, i := range ca {
   513  		strs = append(strs, strconv.Itoa(i))
   514  	}
   515  	return strings.Join(strs, ":")
   516  }
   517  
   518  func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
   519  	e, ok := t.(*Enum)
   520  	if !ok {
   521  		return nil, fmt.Errorf("not an enum: %s", t)
   522  	}
   523  
   524  	if len(ca) > 1 {
   525  		return nil, fmt.Errorf("invalid accessor %s for enum", ca)
   526  	}
   527  
   528  	i := ca[0]
   529  	if i >= len(e.Values) {
   530  		return nil, fmt.Errorf("invalid index %d for %s", i, e)
   531  	}
   532  
   533  	return &e.Values[i], nil
   534  }
   535  
   536  // coreField represents the position of a "child" of a composite type from the
   537  // start of that type.
   538  //
   539  //     /- start of composite
   540  //     | offset * 8 | bitfieldOffset | bitfieldSize | ... |
   541  //                  \- start of field       end of field -/
   542  type coreField struct {
   543  	Type Type
   544  
   545  	// The position of the field from the start of the composite type in bytes.
   546  	offset uint32
   547  
   548  	// The offset of the bitfield in bits from the start of the field.
   549  	bitfieldOffset Bits
   550  
   551  	// The size of the bitfield in bits.
   552  	//
   553  	// Zero if the field is not a bitfield.
   554  	bitfieldSize Bits
   555  }
   556  
   557  func (cf *coreField) adjustOffsetToNthElement(n int) error {
   558  	size, err := Sizeof(cf.Type)
   559  	if err != nil {
   560  		return err
   561  	}
   562  
   563  	cf.offset += uint32(n) * uint32(size)
   564  	return nil
   565  }
   566  
   567  func (cf *coreField) adjustOffsetBits(offset Bits) error {
   568  	align, err := alignof(cf.Type)
   569  	if err != nil {
   570  		return err
   571  	}
   572  
   573  	// We can compute the load offset by:
   574  	// 1) converting the bit offset to bytes with a flooring division.
   575  	// 2) dividing and multiplying that offset by the alignment, yielding the
   576  	//    load size aligned offset.
   577  	offsetBytes := uint32(offset/8) / uint32(align) * uint32(align)
   578  
   579  	// The number of bits remaining is the bit offset less the number of bits
   580  	// we can "skip" with the aligned offset.
   581  	cf.bitfieldOffset = offset - Bits(offsetBytes*8)
   582  
   583  	// We know that cf.offset is aligned at to at least align since we get it
   584  	// from the compiler via BTF. Adding an aligned offsetBytes preserves the
   585  	// alignment.
   586  	cf.offset += offsetBytes
   587  	return nil
   588  }
   589  
   590  func (cf *coreField) sizeBits() (Bits, error) {
   591  	if cf.bitfieldSize > 0 {
   592  		return cf.bitfieldSize, nil
   593  	}
   594  
   595  	// Someone is trying to access a non-bitfield via a bit shift relocation.
   596  	// This happens when a field changes from a bitfield to a regular field
   597  	// between kernel versions. Synthesise the size to make the shifts work.
   598  	size, err := Sizeof(cf.Type)
   599  	if err != nil {
   600  		return 0, nil
   601  	}
   602  	return Bits(size * 8), nil
   603  }
   604  
   605  // coreFindField descends into the local type using the accessor and tries to
   606  // find an equivalent field in target at each step.
   607  //
   608  // Returns the field and the offset of the field from the start of
   609  // target in bits.
   610  func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) {
   611  	local := coreField{Type: localT}
   612  	target := coreField{Type: targetT}
   613  
   614  	// The first index is used to offset a pointer of the base type like
   615  	// when accessing an array.
   616  	if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil {
   617  		return coreField{}, coreField{}, err
   618  	}
   619  
   620  	if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil {
   621  		return coreField{}, coreField{}, err
   622  	}
   623  
   624  	if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
   625  		return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
   626  	}
   627  
   628  	var localMaybeFlex, targetMaybeFlex bool
   629  	for i, acc := range localAcc[1:] {
   630  		switch localType := local.Type.(type) {
   631  		case composite:
   632  			// For composite types acc is used to find the field in the local type,
   633  			// and then we try to find a field in target with the same name.
   634  			localMembers := localType.members()
   635  			if acc >= len(localMembers) {
   636  				return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType)
   637  			}
   638  
   639  			localMember := localMembers[acc]
   640  			if localMember.Name == "" {
   641  				_, ok := localMember.Type.(composite)
   642  				if !ok {
   643  					return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported)
   644  				}
   645  
   646  				// This is an anonymous struct or union, ignore it.
   647  				local = coreField{
   648  					Type:   localMember.Type,
   649  					offset: local.offset + localMember.Offset.Bytes(),
   650  				}
   651  				localMaybeFlex = false
   652  				continue
   653  			}
   654  
   655  			targetType, ok := target.Type.(composite)
   656  			if !ok {
   657  				return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation)
   658  			}
   659  
   660  			targetMember, last, err := coreFindMember(targetType, localMember.Name)
   661  			if err != nil {
   662  				return coreField{}, coreField{}, err
   663  			}
   664  
   665  			local = coreField{
   666  				Type:         localMember.Type,
   667  				offset:       local.offset,
   668  				bitfieldSize: localMember.BitfieldSize,
   669  			}
   670  			localMaybeFlex = acc == len(localMembers)-1
   671  
   672  			target = coreField{
   673  				Type:         targetMember.Type,
   674  				offset:       target.offset,
   675  				bitfieldSize: targetMember.BitfieldSize,
   676  			}
   677  			targetMaybeFlex = last
   678  
   679  			if local.bitfieldSize == 0 && target.bitfieldSize == 0 {
   680  				local.offset += localMember.Offset.Bytes()
   681  				target.offset += targetMember.Offset.Bytes()
   682  				break
   683  			}
   684  
   685  			// Either of the members is a bitfield. Make sure we're at the
   686  			// end of the accessor.
   687  			if next := i + 1; next < len(localAcc[1:]) {
   688  				return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield")
   689  			}
   690  
   691  			if err := local.adjustOffsetBits(localMember.Offset); err != nil {
   692  				return coreField{}, coreField{}, err
   693  			}
   694  
   695  			if err := target.adjustOffsetBits(targetMember.Offset); err != nil {
   696  				return coreField{}, coreField{}, err
   697  			}
   698  
   699  		case *Array:
   700  			// For arrays, acc is the index in the target.
   701  			targetType, ok := target.Type.(*Array)
   702  			if !ok {
   703  				return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation)
   704  			}
   705  
   706  			if localType.Nelems == 0 && !localMaybeFlex {
   707  				return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array")
   708  			}
   709  			if targetType.Nelems == 0 && !targetMaybeFlex {
   710  				return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array")
   711  			}
   712  
   713  			if localType.Nelems > 0 && acc >= int(localType.Nelems) {
   714  				return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc)
   715  			}
   716  			if targetType.Nelems > 0 && acc >= int(targetType.Nelems) {
   717  				return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation)
   718  			}
   719  
   720  			local = coreField{
   721  				Type:   localType.Type,
   722  				offset: local.offset,
   723  			}
   724  			localMaybeFlex = false
   725  
   726  			if err := local.adjustOffsetToNthElement(acc); err != nil {
   727  				return coreField{}, coreField{}, err
   728  			}
   729  
   730  			target = coreField{
   731  				Type:   targetType.Type,
   732  				offset: target.offset,
   733  			}
   734  			targetMaybeFlex = false
   735  
   736  			if err := target.adjustOffsetToNthElement(acc); err != nil {
   737  				return coreField{}, coreField{}, err
   738  			}
   739  
   740  		default:
   741  			return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported)
   742  		}
   743  
   744  		if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
   745  			return coreField{}, coreField{}, err
   746  		}
   747  	}
   748  
   749  	return local, target, nil
   750  }
   751  
   752  // coreFindMember finds a member in a composite type while handling anonymous
   753  // structs and unions.
   754  func coreFindMember(typ composite, name string) (Member, bool, error) {
   755  	if name == "" {
   756  		return Member{}, false, errors.New("can't search for anonymous member")
   757  	}
   758  
   759  	type offsetTarget struct {
   760  		composite
   761  		offset Bits
   762  	}
   763  
   764  	targets := []offsetTarget{{typ, 0}}
   765  	visited := make(map[composite]bool)
   766  
   767  	for i := 0; i < len(targets); i++ {
   768  		target := targets[i]
   769  
   770  		// Only visit targets once to prevent infinite recursion.
   771  		if visited[target] {
   772  			continue
   773  		}
   774  		if len(visited) >= maxTypeDepth {
   775  			// This check is different than libbpf, which restricts the entire
   776  			// path to BPF_CORE_SPEC_MAX_LEN items.
   777  			return Member{}, false, fmt.Errorf("type is nested too deep")
   778  		}
   779  		visited[target] = true
   780  
   781  		members := target.members()
   782  		for j, member := range members {
   783  			if member.Name == name {
   784  				// NB: This is safe because member is a copy.
   785  				member.Offset += target.offset
   786  				return member, j == len(members)-1, nil
   787  			}
   788  
   789  			// The names don't match, but this member could be an anonymous struct
   790  			// or union.
   791  			if member.Name != "" {
   792  				continue
   793  			}
   794  
   795  			comp, ok := member.Type.(composite)
   796  			if !ok {
   797  				return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type)
   798  			}
   799  
   800  			targets = append(targets, offsetTarget{comp, target.offset + member.Offset})
   801  		}
   802  	}
   803  
   804  	return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation)
   805  }
   806  
   807  // coreFindEnumValue follows localAcc to find the equivalent enum value in target.
   808  func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) {
   809  	localValue, err := localAcc.enumValue(local)
   810  	if err != nil {
   811  		return nil, nil, err
   812  	}
   813  
   814  	targetEnum, ok := target.(*Enum)
   815  	if !ok {
   816  		return nil, nil, errImpossibleRelocation
   817  	}
   818  
   819  	localName := newEssentialName(localValue.Name)
   820  	for i, targetValue := range targetEnum.Values {
   821  		if newEssentialName(targetValue.Name) != localName {
   822  			continue
   823  		}
   824  
   825  		return localValue, &targetEnum.Values[i], nil
   826  	}
   827  
   828  	return nil, nil, errImpossibleRelocation
   829  }
   830  
   831  /* The comment below is from bpf_core_types_are_compat in libbpf.c:
   832   *
   833   * Check local and target types for compatibility. This check is used for
   834   * type-based CO-RE relocations and follow slightly different rules than
   835   * field-based relocations. This function assumes that root types were already
   836   * checked for name match. Beyond that initial root-level name check, names
   837   * are completely ignored. Compatibility rules are as follows:
   838   *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
   839   *     kind should match for local and target types (i.e., STRUCT is not
   840   *     compatible with UNION);
   841   *   - for ENUMs, the size is ignored;
   842   *   - for INT, size and signedness are ignored;
   843   *   - for ARRAY, dimensionality is ignored, element types are checked for
   844   *     compatibility recursively;
   845   *   - CONST/VOLATILE/RESTRICT modifiers are ignored;
   846   *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
   847   *   - FUNC_PROTOs are compatible if they have compatible signature: same
   848   *     number of input args and compatible return and argument types.
   849   * These rules are not set in stone and probably will be adjusted as we get
   850   * more experience with using BPF CO-RE relocations.
   851   *
   852   * Returns errImpossibleRelocation if types are not compatible.
   853   */
   854  func coreAreTypesCompatible(localType Type, targetType Type) error {
   855  	var (
   856  		localTs, targetTs typeDeque
   857  		l, t              = &localType, &targetType
   858  		depth             = 0
   859  	)
   860  
   861  	for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
   862  		if depth >= maxTypeDepth {
   863  			return errors.New("types are nested too deep")
   864  		}
   865  
   866  		localType = *l
   867  		targetType = *t
   868  
   869  		if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
   870  			return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
   871  		}
   872  
   873  		switch lv := (localType).(type) {
   874  		case *Void, *Struct, *Union, *Enum, *Fwd, *Int:
   875  			// Nothing to do here
   876  
   877  		case *Pointer, *Array:
   878  			depth++
   879  			localType.walk(&localTs)
   880  			targetType.walk(&targetTs)
   881  
   882  		case *FuncProto:
   883  			tv := targetType.(*FuncProto)
   884  			if len(lv.Params) != len(tv.Params) {
   885  				return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation)
   886  			}
   887  
   888  			depth++
   889  			localType.walk(&localTs)
   890  			targetType.walk(&targetTs)
   891  
   892  		default:
   893  			return fmt.Errorf("unsupported type %T", localType)
   894  		}
   895  	}
   896  
   897  	if l != nil {
   898  		return fmt.Errorf("dangling local type %T", *l)
   899  	}
   900  
   901  	if t != nil {
   902  		return fmt.Errorf("dangling target type %T", *t)
   903  	}
   904  
   905  	return nil
   906  }
   907  
   908  /* coreAreMembersCompatible checks two types for field-based relocation compatibility.
   909   *
   910   * The comment below is from bpf_core_fields_are_compat in libbpf.c:
   911   *
   912   * Check two types for compatibility for the purpose of field access
   913   * relocation. const/volatile/restrict and typedefs are skipped to ensure we
   914   * are relocating semantically compatible entities:
   915   *   - any two STRUCTs/UNIONs are compatible and can be mixed;
   916   *   - any two FWDs are compatible, if their names match (modulo flavor suffix);
   917   *   - any two PTRs are always compatible;
   918   *   - for ENUMs, names should be the same (ignoring flavor suffix) or at
   919   *     least one of enums should be anonymous;
   920   *   - for ENUMs, check sizes, names are ignored;
   921   *   - for INT, size and signedness are ignored;
   922   *   - any two FLOATs are always compatible;
   923   *   - for ARRAY, dimensionality is ignored, element types are checked for
   924   *     compatibility recursively;
   925   *     [ NB: coreAreMembersCompatible doesn't recurse, this check is done
   926   *       by coreFindField. ]
   927   *   - everything else shouldn't be ever a target of relocation.
   928   * These rules are not set in stone and probably will be adjusted as we get
   929   * more experience with using BPF CO-RE relocations.
   930   *
   931   * Returns errImpossibleRelocation if the members are not compatible.
   932   */
   933  func coreAreMembersCompatible(localType Type, targetType Type) error {
   934  	doNamesMatch := func(a, b string) error {
   935  		if a == "" || b == "" {
   936  			// allow anonymous and named type to match
   937  			return nil
   938  		}
   939  
   940  		if newEssentialName(a) == newEssentialName(b) {
   941  			return nil
   942  		}
   943  
   944  		return fmt.Errorf("names don't match: %w", errImpossibleRelocation)
   945  	}
   946  
   947  	_, lok := localType.(composite)
   948  	_, tok := targetType.(composite)
   949  	if lok && tok {
   950  		return nil
   951  	}
   952  
   953  	if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
   954  		return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
   955  	}
   956  
   957  	switch lv := localType.(type) {
   958  	case *Array, *Pointer, *Float, *Int:
   959  		return nil
   960  
   961  	case *Enum:
   962  		tv := targetType.(*Enum)
   963  		return doNamesMatch(lv.Name, tv.Name)
   964  
   965  	case *Fwd:
   966  		tv := targetType.(*Fwd)
   967  		return doNamesMatch(lv.Name, tv.Name)
   968  
   969  	default:
   970  		return fmt.Errorf("type %s: %w", localType, ErrNotSupported)
   971  	}
   972  }
   973  

View as plain text