...

Source file src/go.etcd.io/etcd/raft/v3/node.go

Documentation: go.etcd.io/etcd/raft/v3

     1  // Copyright 2015 The etcd Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package raft
    16  
    17  import (
    18  	"context"
    19  	"errors"
    20  
    21  	pb "go.etcd.io/etcd/raft/v3/raftpb"
    22  )
    23  
    24  type SnapshotStatus int
    25  
    26  const (
    27  	SnapshotFinish  SnapshotStatus = 1
    28  	SnapshotFailure SnapshotStatus = 2
    29  )
    30  
    31  var (
    32  	emptyState = pb.HardState{}
    33  
    34  	// ErrStopped is returned by methods on Nodes that have been stopped.
    35  	ErrStopped = errors.New("raft: stopped")
    36  )
    37  
    38  // SoftState provides state that is useful for logging and debugging.
    39  // The state is volatile and does not need to be persisted to the WAL.
    40  type SoftState struct {
    41  	Lead      uint64 // must use atomic operations to access; keep 64-bit aligned.
    42  	RaftState StateType
    43  }
    44  
    45  func (a *SoftState) equal(b *SoftState) bool {
    46  	return a.Lead == b.Lead && a.RaftState == b.RaftState
    47  }
    48  
    49  // Ready encapsulates the entries and messages that are ready to read,
    50  // be saved to stable storage, committed or sent to other peers.
    51  // All fields in Ready are read-only.
    52  type Ready struct {
    53  	// The current volatile state of a Node.
    54  	// SoftState will be nil if there is no update.
    55  	// It is not required to consume or store SoftState.
    56  	*SoftState
    57  
    58  	// The current state of a Node to be saved to stable storage BEFORE
    59  	// Messages are sent.
    60  	// HardState will be equal to empty state if there is no update.
    61  	pb.HardState
    62  
    63  	// ReadStates can be used for node to serve linearizable read requests locally
    64  	// when its applied index is greater than the index in ReadState.
    65  	// Note that the readState will be returned when raft receives msgReadIndex.
    66  	// The returned is only valid for the request that requested to read.
    67  	ReadStates []ReadState
    68  
    69  	// Entries specifies entries to be saved to stable storage BEFORE
    70  	// Messages are sent.
    71  	Entries []pb.Entry
    72  
    73  	// Snapshot specifies the snapshot to be saved to stable storage.
    74  	Snapshot pb.Snapshot
    75  
    76  	// CommittedEntries specifies entries to be committed to a
    77  	// store/state-machine. These have previously been committed to stable
    78  	// store.
    79  	CommittedEntries []pb.Entry
    80  
    81  	// Messages specifies outbound messages to be sent AFTER Entries are
    82  	// committed to stable storage.
    83  	// If it contains a MsgSnap message, the application MUST report back to raft
    84  	// when the snapshot has been received or has failed by calling ReportSnapshot.
    85  	Messages []pb.Message
    86  
    87  	// MustSync indicates whether the HardState and Entries must be synchronously
    88  	// written to disk or if an asynchronous write is permissible.
    89  	MustSync bool
    90  }
    91  
    92  func isHardStateEqual(a, b pb.HardState) bool {
    93  	return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
    94  }
    95  
    96  // IsEmptyHardState returns true if the given HardState is empty.
    97  func IsEmptyHardState(st pb.HardState) bool {
    98  	return isHardStateEqual(st, emptyState)
    99  }
   100  
   101  // IsEmptySnap returns true if the given Snapshot is empty.
   102  func IsEmptySnap(sp pb.Snapshot) bool {
   103  	return sp.Metadata.Index == 0
   104  }
   105  
   106  func (rd Ready) containsUpdates() bool {
   107  	return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) ||
   108  		!IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 ||
   109  		len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0
   110  }
   111  
   112  // appliedCursor extracts from the Ready the highest index the client has
   113  // applied (once the Ready is confirmed via Advance). If no information is
   114  // contained in the Ready, returns zero.
   115  func (rd Ready) appliedCursor() uint64 {
   116  	if n := len(rd.CommittedEntries); n > 0 {
   117  		return rd.CommittedEntries[n-1].Index
   118  	}
   119  	if index := rd.Snapshot.Metadata.Index; index > 0 {
   120  		return index
   121  	}
   122  	return 0
   123  }
   124  
   125  // Node represents a node in a raft cluster.
   126  type Node interface {
   127  	// Tick increments the internal logical clock for the Node by a single tick. Election
   128  	// timeouts and heartbeat timeouts are in units of ticks.
   129  	Tick()
   130  	// Campaign causes the Node to transition to candidate state and start campaigning to become leader.
   131  	Campaign(ctx context.Context) error
   132  	// Propose proposes that data be appended to the log. Note that proposals can be lost without
   133  	// notice, therefore it is user's job to ensure proposal retries.
   134  	Propose(ctx context.Context, data []byte) error
   135  	// ProposeConfChange proposes a configuration change. Like any proposal, the
   136  	// configuration change may be dropped with or without an error being
   137  	// returned. In particular, configuration changes are dropped unless the
   138  	// leader has certainty that there is no prior unapplied configuration
   139  	// change in its log.
   140  	//
   141  	// The method accepts either a pb.ConfChange (deprecated) or pb.ConfChangeV2
   142  	// message. The latter allows arbitrary configuration changes via joint
   143  	// consensus, notably including replacing a voter. Passing a ConfChangeV2
   144  	// message is only allowed if all Nodes participating in the cluster run a
   145  	// version of this library aware of the V2 API. See pb.ConfChangeV2 for
   146  	// usage details and semantics.
   147  	ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error
   148  
   149  	// Step advances the state machine using the given message. ctx.Err() will be returned, if any.
   150  	Step(ctx context.Context, msg pb.Message) error
   151  
   152  	// Ready returns a channel that returns the current point-in-time state.
   153  	// Users of the Node must call Advance after retrieving the state returned by Ready.
   154  	//
   155  	// NOTE: No committed entries from the next Ready may be applied until all committed entries
   156  	// and snapshots from the previous one have finished.
   157  	Ready() <-chan Ready
   158  
   159  	// Advance notifies the Node that the application has saved progress up to the last Ready.
   160  	// It prepares the node to return the next available Ready.
   161  	//
   162  	// The application should generally call Advance after it applies the entries in last Ready.
   163  	//
   164  	// However, as an optimization, the application may call Advance while it is applying the
   165  	// commands. For example. when the last Ready contains a snapshot, the application might take
   166  	// a long time to apply the snapshot data. To continue receiving Ready without blocking raft
   167  	// progress, it can call Advance before finishing applying the last ready.
   168  	Advance()
   169  	// ApplyConfChange applies a config change (previously passed to
   170  	// ProposeConfChange) to the node. This must be called whenever a config
   171  	// change is observed in Ready.CommittedEntries, except when the app decides
   172  	// to reject the configuration change (i.e. treats it as a noop instead), in
   173  	// which case it must not be called.
   174  	//
   175  	// Returns an opaque non-nil ConfState protobuf which must be recorded in
   176  	// snapshots.
   177  	ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState
   178  
   179  	// TransferLeadership attempts to transfer leadership to the given transferee.
   180  	TransferLeadership(ctx context.Context, lead, transferee uint64)
   181  
   182  	// ReadIndex request a read state. The read state will be set in the ready.
   183  	// Read state has a read index. Once the application advances further than the read
   184  	// index, any linearizable read requests issued before the read request can be
   185  	// processed safely. The read state will have the same rctx attached.
   186  	// Note that request can be lost without notice, therefore it is user's job
   187  	// to ensure read index retries.
   188  	ReadIndex(ctx context.Context, rctx []byte) error
   189  
   190  	// Status returns the current status of the raft state machine.
   191  	Status() Status
   192  	// ReportUnreachable reports the given node is not reachable for the last send.
   193  	ReportUnreachable(id uint64)
   194  	// ReportSnapshot reports the status of the sent snapshot. The id is the raft ID of the follower
   195  	// who is meant to receive the snapshot, and the status is SnapshotFinish or SnapshotFailure.
   196  	// Calling ReportSnapshot with SnapshotFinish is a no-op. But, any failure in applying a
   197  	// snapshot (for e.g., while streaming it from leader to follower), should be reported to the
   198  	// leader with SnapshotFailure. When leader sends a snapshot to a follower, it pauses any raft
   199  	// log probes until the follower can apply the snapshot and advance its state. If the follower
   200  	// can't do that, for e.g., due to a crash, it could end up in a limbo, never getting any
   201  	// updates from the leader. Therefore, it is crucial that the application ensures that any
   202  	// failure in snapshot sending is caught and reported back to the leader; so it can resume raft
   203  	// log probing in the follower.
   204  	ReportSnapshot(id uint64, status SnapshotStatus)
   205  	// Stop performs any necessary termination of the Node.
   206  	Stop()
   207  }
   208  
   209  type Peer struct {
   210  	ID      uint64
   211  	Context []byte
   212  }
   213  
   214  // StartNode returns a new Node given configuration and a list of raft peers.
   215  // It appends a ConfChangeAddNode entry for each given peer to the initial log.
   216  //
   217  // Peers must not be zero length; call RestartNode in that case.
   218  func StartNode(c *Config, peers []Peer) Node {
   219  	if len(peers) == 0 {
   220  		panic("no peers given; use RestartNode instead")
   221  	}
   222  	rn, err := NewRawNode(c)
   223  	if err != nil {
   224  		panic(err)
   225  	}
   226  	rn.Bootstrap(peers)
   227  
   228  	n := newNode(rn)
   229  
   230  	go n.run()
   231  	return &n
   232  }
   233  
   234  // RestartNode is similar to StartNode but does not take a list of peers.
   235  // The current membership of the cluster will be restored from the Storage.
   236  // If the caller has an existing state machine, pass in the last log index that
   237  // has been applied to it; otherwise use zero.
   238  func RestartNode(c *Config) Node {
   239  	rn, err := NewRawNode(c)
   240  	if err != nil {
   241  		panic(err)
   242  	}
   243  	n := newNode(rn)
   244  	go n.run()
   245  	return &n
   246  }
   247  
   248  type msgWithResult struct {
   249  	m      pb.Message
   250  	result chan error
   251  }
   252  
   253  // node is the canonical implementation of the Node interface
   254  type node struct {
   255  	propc      chan msgWithResult
   256  	recvc      chan pb.Message
   257  	confc      chan pb.ConfChangeV2
   258  	confstatec chan pb.ConfState
   259  	readyc     chan Ready
   260  	advancec   chan struct{}
   261  	tickc      chan struct{}
   262  	done       chan struct{}
   263  	stop       chan struct{}
   264  	status     chan chan Status
   265  
   266  	rn *RawNode
   267  }
   268  
   269  func newNode(rn *RawNode) node {
   270  	return node{
   271  		propc:      make(chan msgWithResult),
   272  		recvc:      make(chan pb.Message),
   273  		confc:      make(chan pb.ConfChangeV2),
   274  		confstatec: make(chan pb.ConfState),
   275  		readyc:     make(chan Ready),
   276  		advancec:   make(chan struct{}),
   277  		// make tickc a buffered chan, so raft node can buffer some ticks when the node
   278  		// is busy processing raft messages. Raft node will resume process buffered
   279  		// ticks when it becomes idle.
   280  		tickc:  make(chan struct{}, 128),
   281  		done:   make(chan struct{}),
   282  		stop:   make(chan struct{}),
   283  		status: make(chan chan Status),
   284  		rn:     rn,
   285  	}
   286  }
   287  
   288  func (n *node) Stop() {
   289  	select {
   290  	case n.stop <- struct{}{}:
   291  		// Not already stopped, so trigger it
   292  	case <-n.done:
   293  		// Node has already been stopped - no need to do anything
   294  		return
   295  	}
   296  	// Block until the stop has been acknowledged by run()
   297  	<-n.done
   298  }
   299  
   300  func (n *node) run() {
   301  	var propc chan msgWithResult
   302  	var readyc chan Ready
   303  	var advancec chan struct{}
   304  	var rd Ready
   305  
   306  	r := n.rn.raft
   307  
   308  	lead := None
   309  
   310  	for {
   311  		if advancec != nil {
   312  			readyc = nil
   313  		} else if n.rn.HasReady() {
   314  			// Populate a Ready. Note that this Ready is not guaranteed to
   315  			// actually be handled. We will arm readyc, but there's no guarantee
   316  			// that we will actually send on it. It's possible that we will
   317  			// service another channel instead, loop around, and then populate
   318  			// the Ready again. We could instead force the previous Ready to be
   319  			// handled first, but it's generally good to emit larger Readys plus
   320  			// it simplifies testing (by emitting less frequently and more
   321  			// predictably).
   322  			rd = n.rn.readyWithoutAccept()
   323  			readyc = n.readyc
   324  		}
   325  
   326  		if lead != r.lead {
   327  			if r.hasLeader() {
   328  				if lead == None {
   329  					r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
   330  				} else {
   331  					r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
   332  				}
   333  				propc = n.propc
   334  			} else {
   335  				r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
   336  				propc = nil
   337  			}
   338  			lead = r.lead
   339  		}
   340  
   341  		select {
   342  		// TODO: maybe buffer the config propose if there exists one (the way
   343  		// described in raft dissertation)
   344  		// Currently it is dropped in Step silently.
   345  		case pm := <-propc:
   346  			m := pm.m
   347  			m.From = r.id
   348  			err := r.Step(m)
   349  			if pm.result != nil {
   350  				pm.result <- err
   351  				close(pm.result)
   352  			}
   353  		case m := <-n.recvc:
   354  			// filter out response message from unknown From.
   355  			if pr := r.prs.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) {
   356  				r.Step(m)
   357  			}
   358  		case cc := <-n.confc:
   359  			_, okBefore := r.prs.Progress[r.id]
   360  			cs := r.applyConfChange(cc)
   361  			// If the node was removed, block incoming proposals. Note that we
   362  			// only do this if the node was in the config before. Nodes may be
   363  			// a member of the group without knowing this (when they're catching
   364  			// up on the log and don't have the latest config) and we don't want
   365  			// to block the proposal channel in that case.
   366  			//
   367  			// NB: propc is reset when the leader changes, which, if we learn
   368  			// about it, sort of implies that we got readded, maybe? This isn't
   369  			// very sound and likely has bugs.
   370  			if _, okAfter := r.prs.Progress[r.id]; okBefore && !okAfter {
   371  				var found bool
   372  			outer:
   373  				for _, sl := range [][]uint64{cs.Voters, cs.VotersOutgoing} {
   374  					for _, id := range sl {
   375  						if id == r.id {
   376  							found = true
   377  							break outer
   378  						}
   379  					}
   380  				}
   381  				if !found {
   382  					propc = nil
   383  				}
   384  			}
   385  			select {
   386  			case n.confstatec <- cs:
   387  			case <-n.done:
   388  			}
   389  		case <-n.tickc:
   390  			n.rn.Tick()
   391  		case readyc <- rd:
   392  			n.rn.acceptReady(rd)
   393  			advancec = n.advancec
   394  		case <-advancec:
   395  			n.rn.Advance(rd)
   396  			rd = Ready{}
   397  			advancec = nil
   398  		case c := <-n.status:
   399  			c <- getStatus(r)
   400  		case <-n.stop:
   401  			close(n.done)
   402  			return
   403  		}
   404  	}
   405  }
   406  
   407  // Tick increments the internal logical clock for this Node. Election timeouts
   408  // and heartbeat timeouts are in units of ticks.
   409  func (n *node) Tick() {
   410  	select {
   411  	case n.tickc <- struct{}{}:
   412  	case <-n.done:
   413  	default:
   414  		n.rn.raft.logger.Warningf("%x A tick missed to fire. Node blocks too long!", n.rn.raft.id)
   415  	}
   416  }
   417  
   418  func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
   419  
   420  func (n *node) Propose(ctx context.Context, data []byte) error {
   421  	return n.stepWait(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
   422  }
   423  
   424  func (n *node) Step(ctx context.Context, m pb.Message) error {
   425  	// ignore unexpected local messages receiving over network
   426  	if IsLocalMsg(m.Type) {
   427  		// TODO: return an error?
   428  		return nil
   429  	}
   430  	return n.step(ctx, m)
   431  }
   432  
   433  func confChangeToMsg(c pb.ConfChangeI) (pb.Message, error) {
   434  	typ, data, err := pb.MarshalConfChange(c)
   435  	if err != nil {
   436  		return pb.Message{}, err
   437  	}
   438  	return pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: typ, Data: data}}}, nil
   439  }
   440  
   441  func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error {
   442  	msg, err := confChangeToMsg(cc)
   443  	if err != nil {
   444  		return err
   445  	}
   446  	return n.Step(ctx, msg)
   447  }
   448  
   449  func (n *node) step(ctx context.Context, m pb.Message) error {
   450  	return n.stepWithWaitOption(ctx, m, false)
   451  }
   452  
   453  func (n *node) stepWait(ctx context.Context, m pb.Message) error {
   454  	return n.stepWithWaitOption(ctx, m, true)
   455  }
   456  
   457  // Step advances the state machine using msgs. The ctx.Err() will be returned,
   458  // if any.
   459  func (n *node) stepWithWaitOption(ctx context.Context, m pb.Message, wait bool) error {
   460  	if m.Type != pb.MsgProp {
   461  		select {
   462  		case n.recvc <- m:
   463  			return nil
   464  		case <-ctx.Done():
   465  			return ctx.Err()
   466  		case <-n.done:
   467  			return ErrStopped
   468  		}
   469  	}
   470  	ch := n.propc
   471  	pm := msgWithResult{m: m}
   472  	if wait {
   473  		pm.result = make(chan error, 1)
   474  	}
   475  	select {
   476  	case ch <- pm:
   477  		if !wait {
   478  			return nil
   479  		}
   480  	case <-ctx.Done():
   481  		return ctx.Err()
   482  	case <-n.done:
   483  		return ErrStopped
   484  	}
   485  	select {
   486  	case err := <-pm.result:
   487  		if err != nil {
   488  			return err
   489  		}
   490  	case <-ctx.Done():
   491  		return ctx.Err()
   492  	case <-n.done:
   493  		return ErrStopped
   494  	}
   495  	return nil
   496  }
   497  
   498  func (n *node) Ready() <-chan Ready { return n.readyc }
   499  
   500  func (n *node) Advance() {
   501  	select {
   502  	case n.advancec <- struct{}{}:
   503  	case <-n.done:
   504  	}
   505  }
   506  
   507  func (n *node) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState {
   508  	var cs pb.ConfState
   509  	select {
   510  	case n.confc <- cc.AsV2():
   511  	case <-n.done:
   512  	}
   513  	select {
   514  	case cs = <-n.confstatec:
   515  	case <-n.done:
   516  	}
   517  	return &cs
   518  }
   519  
   520  func (n *node) Status() Status {
   521  	c := make(chan Status)
   522  	select {
   523  	case n.status <- c:
   524  		return <-c
   525  	case <-n.done:
   526  		return Status{}
   527  	}
   528  }
   529  
   530  func (n *node) ReportUnreachable(id uint64) {
   531  	select {
   532  	case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
   533  	case <-n.done:
   534  	}
   535  }
   536  
   537  func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
   538  	rej := status == SnapshotFailure
   539  
   540  	select {
   541  	case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
   542  	case <-n.done:
   543  	}
   544  }
   545  
   546  func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) {
   547  	select {
   548  	// manually set 'from' and 'to', so that leader can voluntarily transfers its leadership
   549  	case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}:
   550  	case <-n.done:
   551  	case <-ctx.Done():
   552  	}
   553  }
   554  
   555  func (n *node) ReadIndex(ctx context.Context, rctx []byte) error {
   556  	return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
   557  }
   558  
   559  func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
   560  	rd := Ready{
   561  		Entries:          r.raftLog.unstableEntries(),
   562  		CommittedEntries: r.raftLog.nextEnts(),
   563  		Messages:         r.msgs,
   564  	}
   565  	if softSt := r.softState(); !softSt.equal(prevSoftSt) {
   566  		rd.SoftState = softSt
   567  	}
   568  	if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) {
   569  		rd.HardState = hardSt
   570  	}
   571  	if r.raftLog.unstable.snapshot != nil {
   572  		rd.Snapshot = *r.raftLog.unstable.snapshot
   573  	}
   574  	if len(r.readStates) != 0 {
   575  		rd.ReadStates = r.readStates
   576  	}
   577  	rd.MustSync = MustSync(r.hardState(), prevHardSt, len(rd.Entries))
   578  	return rd
   579  }
   580  
   581  // MustSync returns true if the hard state and count of Raft entries indicate
   582  // that a synchronous write to persistent storage is required.
   583  func MustSync(st, prevst pb.HardState, entsnum int) bool {
   584  	// Persistent state on all servers:
   585  	// (Updated on stable storage before responding to RPCs)
   586  	// currentTerm
   587  	// votedFor
   588  	// log entries[]
   589  	return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term
   590  }
   591  

View as plain text