diff --git a/go.mod b/go.mod index da9858d9ab..6cd6a65f88 100644 --- a/go.mod +++ b/go.mod @@ -39,8 +39,7 @@ require ( github.com/hashicorp/go-connlimit v0.3.0 github.com/hashicorp/go-discover v0.0.0-20200501174627-ad1e96bde088 github.com/hashicorp/go-hclog v0.12.0 - github.com/hashicorp/go-immutable-radix v1.2.0 // indirect - github.com/hashicorp/go-memdb v1.1.0 + github.com/hashicorp/go-memdb v1.3.0 github.com/hashicorp/go-msgpack v0.5.5 github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/go-raftchunking v0.6.1 diff --git a/go.sum b/go.sum index 068f3d8aaf..89f4dad40f 100644 --- a/go.sum +++ b/go.sum @@ -235,12 +235,11 @@ github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrj github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8= -github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE= +github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= -github.com/hashicorp/go-memdb v1.1.0 h1:ClvpUXpBA6UDs5+vc1h3wqe4UJU+rwum7CU219SeCbk= -github.com/hashicorp/go-memdb v1.1.0/go.mod h1:LWQ8R70vPrS4OEY9k28D2z8/Zzyu34NVzeRibGAzHO0= +github.com/hashicorp/go-memdb v1.3.0 h1:xdXq34gBOMEloa9rlGStLxmfX/dyIK8htOv36dQUwHU= +github.com/hashicorp/go-memdb v1.3.0/go.mod h1:Mluclgwib3R93Hk5fxEfiRhB+6Dar64wWh71LpNSe3g= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= diff --git a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md index a967ae456d..6331af921b 100644 --- a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md +++ b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md @@ -1,4 +1,16 @@ -# UNRELEASED +# 1.3.0 (September 17th, 2020) + +FEATURES + +* Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)] + +# 1.2.0 (March 18th, 2020) + +FEATURES + +* Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)] + +# 1.1.0 (May 22nd, 2019) FEATURES diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go index 1ecaf831c7..cd16d3beae 100644 --- a/vendor/github.com/hashicorp/go-immutable-radix/iter.go +++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -155,7 +155,7 @@ func (i *Iterator) Next() ([]byte, interface{}, bool) { // Initialize our stack if needed if i.stack == nil && i.node != nil { i.stack = []edges{ - edges{ + { edge{node: i.node}, }, } diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go index 3ab904edce..3598548087 100644 --- a/vendor/github.com/hashicorp/go-immutable-radix/node.go +++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go @@ -211,6 +211,12 @@ func (n *Node) Iterator() *Iterator { return &Iterator{node: n} } +// ReverseIterator is used to return an iterator at +// the given node to walk the tree backwards +func (n *Node) ReverseIterator() *ReverseIterator { + return NewReverseIterator(n) +} + // rawIterator is used to return a raw iterator at the given node to walk the // tree. func (n *Node) rawIterator() *rawIterator { @@ -224,6 +230,11 @@ func (n *Node) Walk(fn WalkFn) { recursiveWalk(n, fn) } +// WalkBackwards is used to walk the tree in reverse order +func (n *Node) WalkBackwards(fn WalkFn) { + reverseRecursiveWalk(n, fn) +} + // WalkPrefix is used to walk the tree under a prefix func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { search := prefix @@ -302,3 +313,22 @@ func recursiveWalk(n *Node, fn WalkFn) bool { } return false } + +// reverseRecursiveWalk is used to do a reverse pre-order +// walk of a node recursively. Returns true if the walk +// should be aborted +func reverseRecursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children in reverse order + for i := len(n.edges) - 1; i >= 0; i-- { + e := n.edges[i] + if reverseRecursiveWalk(e.node, fn) { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go index 04814c1323..3c6a22525c 100644 --- a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go +++ b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go @@ -41,7 +41,7 @@ func (i *rawIterator) Next() { // Initialize our stack if needed. if i.stack == nil && i.node != nil { i.stack = []rawStackEntry{ - rawStackEntry{ + { edges: edges{ edge{node: i.node}, }, diff --git a/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go new file mode 100644 index 0000000000..762471bc36 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go @@ -0,0 +1,177 @@ +package iradix + +import ( + "bytes" +) + +// ReverseIterator is used to iterate over a set of nodes +// in reverse in-order +type ReverseIterator struct { + i *Iterator +} + +// NewReverseIterator returns a new ReverseIterator at a node +func NewReverseIterator(n *Node) *ReverseIterator { + return &ReverseIterator{ + i: &Iterator{node: n}, + } +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (ri *ReverseIterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + return ri.i.SeekPrefixWatch(prefix) +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (ri *ReverseIterator) SeekPrefix(prefix []byte) { + ri.i.SeekPrefixWatch(prefix) +} + +func (ri *ReverseIterator) recurseMax(n *Node) *Node { + // Traverse to the maximum child + if n.leaf != nil { + return n + } + if len(n.edges) > 0 { + // Add all the other edges to the stack (the max node will be added as + // we recurse) + m := len(n.edges) + ri.i.stack = append(ri.i.stack, n.edges[:m-1]) + return ri.recurseMax(n.edges[m-1].node) + } + // Shouldn't be possible + return nil +} + +// SeekReverseLowerBound is used to seek the iterator to the largest key that is +// lower or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. + ri.i.stack = []edges{} + n := ri.i.node + search := key + + found := func(n *Node) { + ri.i.node = n + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no lower bound. + // But we are looking in reverse, so the reverse lower bound will be the + // largest leaf under this subtree, since it is the value that would come + // right before the current search prefix if it were in the tree. So we need + // to follow the maximum path in this subtree to find it. + n = ri.recurseMax(n) + if n != nil { + found(n) + } + return + } + + if prefixCmp > 0 { + // Prefix is larger than search prefix, that means there is no reverse lower + // bound since nothing comes before our current search prefix. + ri.i.node = nil + return + } + + // Prefix is equal, we are still heading for an exact match. If this is a + // leaf we're done. + if n.leaf != nil { + if bytes.Compare(n.leaf.key, key) < 0 { + ri.i.node = nil + return + } + found(n) + return + } + + // Consume the search prefix + if len(n.prefix) > len(search) { + search = []byte{} + } else { + search = search[len(n.prefix):] + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + + // From here, we need to update the stack with all values lower than + // the lower bound edge. Since getLowerBoundEdge() returns -1 when the + // search prefix is larger than all edges, we need to place idx at the + // last edge index so they can all be place in the stack, since they + // come before our search prefix. + if idx == -1 { + idx = len(n.edges) + } + + // Create stack edges for the all strictly lower edges in this node. + if len(n.edges[:idx]) > 0 { + ri.i.stack = append(ri.i.stack, n.edges[:idx]) + } + + // Exit if there's not lower bound edge. The stack will have the + // previous nodes already. + if lbNode == nil { + ri.i.node = nil + return + } + + ri.i.node = lbNode + // Recurse + n = lbNode + } +} + +// Previous returns the previous node in reverse order +func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if ri.i.stack == nil && ri.i.node != nil { + ri.i.stack = []edges{ + { + edge{node: ri.i.node}, + }, + } + } + + for len(ri.i.stack) > 0 { + // Inspect the last element of the stack + n := len(ri.i.stack) + last := ri.i.stack[n-1] + m := len(last) + elem := last[m-1].node + + // Update the stack + if m > 1 { + ri.i.stack[n-1] = last[:m-1] + } else { + ri.i.stack = ri.i.stack[:n-1] + } + + // Push the edges onto the frontier + if len(elem.edges) > 0 { + ri.i.stack = append(ri.i.stack, elem.edges) + } + + // Return the leaf values if any + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + } + return nil, nil, false +} diff --git a/vendor/github.com/hashicorp/go-memdb/README.md b/vendor/github.com/hashicorp/go-memdb/README.md index f445a756d5..080b7447b2 100644 --- a/vendor/github.com/hashicorp/go-memdb/README.md +++ b/vendor/github.com/hashicorp/go-memdb/README.md @@ -32,7 +32,7 @@ For the underlying immutable radix trees, see [go-immutable-radix](https://githu Documentation ============= -The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-memdb). +The full documentation is available on [Godoc](https://pkg.go.dev/github.com/hashicorp/go-memdb). Example ======= diff --git a/vendor/github.com/hashicorp/go-memdb/go.mod b/vendor/github.com/hashicorp/go-memdb/go.mod index 34bfd82436..242f5fac2d 100644 --- a/vendor/github.com/hashicorp/go-memdb/go.mod +++ b/vendor/github.com/hashicorp/go-memdb/go.mod @@ -2,4 +2,7 @@ module github.com/hashicorp/go-memdb go 1.12 -require github.com/hashicorp/go-immutable-radix v1.1.0 +require ( + github.com/hashicorp/go-immutable-radix v1.3.0 + github.com/hashicorp/golang-lru v0.5.4 // indirect +) diff --git a/vendor/github.com/hashicorp/go-memdb/go.sum b/vendor/github.com/hashicorp/go-memdb/go.sum index 1a21d603a8..eaff521cec 100644 --- a/vendor/github.com/hashicorp/go-memdb/go.sum +++ b/vendor/github.com/hashicorp/go-memdb/go.sum @@ -1,6 +1,8 @@ -github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc= -github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE= +github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= diff --git a/vendor/github.com/hashicorp/go-memdb/index.go b/vendor/github.com/hashicorp/go-memdb/index.go index 604dff7e99..41c392b5f2 100644 --- a/vendor/github.com/hashicorp/go-memdb/index.go +++ b/vendor/github.com/hashicorp/go-memdb/index.go @@ -428,6 +428,41 @@ func IsUintType(k reflect.Kind) (size int, okay bool) { } } +// BoolFieldIndex is used to extract an boolean field from an object using +// reflection and builds an index on that field. +type BoolFieldIndex struct { + Field string +} + +func (i *BoolFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(i.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", i.Field, obj) + } + + // Check the type + k := fv.Kind() + if k != reflect.Bool { + return false, nil, fmt.Errorf("field %q is of type %v; want a bool", i.Field, k) + } + + // Get the value and encode it + buf := make([]byte, 1) + if fv.Bool() { + buf[0] = 1 + } + + return true, buf, nil +} + +func (i *BoolFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + return fromBoolArgs(args) +} + // UUIDFieldIndex is used to extract a field from an object // using reflection and builds an index on that field by treating // it as a UUID. This is an optimization to using a StringFieldIndex diff --git a/vendor/github.com/hashicorp/go-memdb/txn.go b/vendor/github.com/hashicorp/go-memdb/txn.go index 5325d9f1e9..68734e37c8 100644 --- a/vendor/github.com/hashicorp/go-memdb/txn.go +++ b/vendor/github.com/hashicorp/go-memdb/txn.go @@ -536,6 +536,34 @@ func (txn *Txn) FirstWatch(table, index string, args ...interface{}) (<-chan str return watch, value, nil } +// LastWatch is used to return the last matching object for +// the given constraints on the index along with the watch channel +func (txn *Txn) LastWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) { + // Get the index value + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + + // Do an exact lookup + if indexSchema.Unique && val != nil && indexSchema.Name == index { + watch, obj, ok := indexTxn.GetWatch(val) + if !ok { + return watch, nil, nil + } + return watch, obj, nil + } + + // Handle non-unique index by using an iterator and getting the last value + iter := indexTxn.Root().ReverseIterator() + watch := iter.SeekPrefixWatch(val) + _, value, _ := iter.Previous() + return watch, value, nil +} + // First is used to return the first matching object for // the given constraints on the index func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, error) { @@ -543,6 +571,13 @@ func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, er return val, err } +// Last is used to return the last matching object for +// the given constraints on the index +func (txn *Txn) Last(table, index string, args ...interface{}) (interface{}, error) { + _, val, err := txn.LastWatch(table, index, args...) + return val, err +} + // LongestPrefix is used to fetch the longest prefix match for the given // constraints on the index. Note that this will not work with the memdb // StringFieldIndex because it adds null terminators which prevent the @@ -654,6 +689,26 @@ func (txn *Txn) Get(table, index string, args ...interface{}) (ResultIterator, e return iter, nil } +// GetReverse is used to construct a Reverse ResultIterator over all the +// rows that match the given constraints of an index. +// The returned ResultIterator's Next() will return the next Previous value +func (txn *Txn) GetReverse(table, index string, args ...interface{}) (ResultIterator, error) { + indexIter, val, err := txn.getIndexIteratorReverse(table, index, args...) + if err != nil { + return nil, err + } + + // Seek the iterator to the appropriate sub-set + watchCh := indexIter.SeekPrefixWatch(val) + + // Create an iterator + iter := &radixReverseIterator{ + iter: indexIter, + watchCh: watchCh, + } + return iter, nil +} + // LowerBound is used to construct a ResultIterator over all the the range of // rows that have an index value greater than or equal to the provide args. // Calling this then iterating until the rows are larger than required allows @@ -676,6 +731,29 @@ func (txn *Txn) LowerBound(table, index string, args ...interface{}) (ResultIter return iter, nil } +// ReverseLowerBound is used to construct a Reverse ResultIterator over all the +// the range of rows that have an index value less than or equal to the +// provide args. Calling this then iterating until the rows are lower than +// required allows range scans within an index. It is not possible to watch the +// resulting iterator since the radix tree doesn't efficiently allow watching +// on lower bound changes. The WatchCh returned will be nill and so will block +// forever. +func (txn *Txn) ReverseLowerBound(table, index string, args ...interface{}) (ResultIterator, error) { + indexIter, val, err := txn.getIndexIteratorReverse(table, index, args...) + if err != nil { + return nil, err + } + + // Seek the iterator to the appropriate sub-set + indexIter.SeekReverseLowerBound(val) + + // Create an iterator + iter := &radixReverseIterator{ + iter: indexIter, + } + return iter, nil +} + // objectID is a tuple of table name and the raw internal id byte slice // converted to a string. It's only converted to a string to make it comparable // so this struct can be used as a map index. @@ -744,6 +822,15 @@ func (txn *Txn) Changes() Changes { // case it's different. Note that m is not a pointer so we are not // modifying the txn.changeSet here - it's already a copy. m.Before = mi.firstBefore + + // Edge case - if the object was inserted and then eventually deleted in + // the same transaction, then the net affect on that key is a no-op. Don't + // emit a mutation with nil for before and after as it's meaningless and + // might violate expectations and cause a panic in code that assumes at + // least one must be set. + if m.Before == nil && m.After == nil { + continue + } cs = append(cs, m) } } @@ -768,6 +855,22 @@ func (txn *Txn) getIndexIterator(table, index string, args ...interface{}) (*ira return indexIter, val, nil } +func (txn *Txn) getIndexIteratorReverse(table, index string, args ...interface{}) (*iradix.ReverseIterator, []byte, error) { + // Get the index value to scan + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + indexRoot := indexTxn.Root() + + // Get an interator over the index + indexIter := indexRoot.ReverseIterator() + return indexIter, val, nil +} + // Defer is used to push a new arbitrary function onto a stack which // gets called when a transaction is committed and finished. Deferred // functions are called in LIFO order, and only invoked at the end of @@ -795,3 +898,43 @@ func (r *radixIterator) Next() interface{} { } return value } + +type radixReverseIterator struct { + iter *iradix.ReverseIterator + watchCh <-chan struct{} +} + +func (r *radixReverseIterator) Next() interface{} { + _, value, ok := r.iter.Previous() + if !ok { + return nil + } + return value +} + +func (r *radixReverseIterator) WatchCh() <-chan struct{} { + return r.watchCh +} + +// Snapshot creates a snapshot of the current state of the transaction. +// Returns a new read-only transaction or nil if the transaction is already +// aborted or committed. +func (txn *Txn) Snapshot() *Txn { + if txn.rootTxn == nil { + return nil + } + + snapshot := &Txn{ + db: txn.db, + rootTxn: txn.rootTxn.Clone(), + } + + // Commit sub-transactions into the snapshot + for key, subTxn := range txn.modified { + path := indexPath(key.Table, key.Index) + final := subTxn.CommitOnly() + snapshot.rootTxn.Insert(path, final) + } + + return snapshot +} diff --git a/vendor/github.com/hashicorp/go-memdb/watch.go b/vendor/github.com/hashicorp/go-memdb/watch.go index a6f01213be..7de78a1298 100644 --- a/vendor/github.com/hashicorp/go-memdb/watch.go +++ b/vendor/github.com/hashicorp/go-memdb/watch.go @@ -127,3 +127,18 @@ func (w WatchSet) watchMany(ctx context.Context) error { return ctx.Err() } } + +// WatchCh returns a channel that is used to wait for either the watch set to trigger +// or for the context to be cancelled. WatchCh creates a new goroutine each call, so +// callers may need to cache the returned channel to avoid creating extra goroutines. +func (w WatchSet) WatchCh(ctx context.Context) <-chan error { + // Create the outgoing channel + triggerCh := make(chan error, 1) + + // Create a goroutine to collect the error from WatchCtx + go func() { + triggerCh <- w.WatchCtx(ctx) + }() + + return triggerCh +} diff --git a/vendor/modules.txt b/vendor/modules.txt index f1d06d10de..dc43c7bbcb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -229,9 +229,9 @@ github.com/hashicorp/go-discover/provider/triton github.com/hashicorp/go-discover/provider/vsphere # github.com/hashicorp/go-hclog v0.12.0 github.com/hashicorp/go-hclog -# github.com/hashicorp/go-immutable-radix v1.2.0 +# github.com/hashicorp/go-immutable-radix v1.3.0 github.com/hashicorp/go-immutable-radix -# github.com/hashicorp/go-memdb v1.1.0 +# github.com/hashicorp/go-memdb v1.3.0 github.com/hashicorp/go-memdb # github.com/hashicorp/go-msgpack v0.5.5 github.com/hashicorp/go-msgpack/codec