mirror of https://github.com/hashicorp/consul
Daniel Nephin
4 years ago
7 changed files with 373 additions and 15 deletions
@ -0,0 +1,36 @@
|
||||
/* |
||||
Package mutex implements the sync.Locker interface using x/sync/semaphore. It |
||||
may be used as a replacement for sync.Mutex when one or more goroutines need to |
||||
allow their calls to Lock to be cancelled by context cancellation. |
||||
*/ |
||||
package mutex |
||||
|
||||
import ( |
||||
"context" |
||||
|
||||
"golang.org/x/sync/semaphore" |
||||
) |
||||
|
||||
type Mutex semaphore.Weighted |
||||
|
||||
// New returns a Mutex that is ready for use.
|
||||
func New() *Mutex { |
||||
return (*Mutex)(semaphore.NewWeighted(1)) |
||||
} |
||||
|
||||
func (m *Mutex) Lock() { |
||||
_ = (*semaphore.Weighted)(m).Acquire(context.Background(), 1) |
||||
} |
||||
|
||||
func (m *Mutex) Unlock() { |
||||
(*semaphore.Weighted)(m).Release(1) |
||||
} |
||||
|
||||
// TryLock acquires the mutex, blocking until resources are available or ctx is
|
||||
// done. On success, returns nil. On failure, returns ctx.Err() and leaves the
|
||||
// semaphore unchanged.
|
||||
//
|
||||
// If ctx is already done, Acquire may still succeed without blocking.
|
||||
func (m *Mutex) TryLock(ctx context.Context) error { |
||||
return (*semaphore.Weighted)(m).Acquire(ctx, 1) |
||||
} |
@ -0,0 +1,93 @@
|
||||
package mutex |
||||
|
||||
import ( |
||||
"context" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func TestMutex(t *testing.T) { |
||||
t.Run("starts unlocked", func(t *testing.T) { |
||||
m := New() |
||||
canLock(t, m) |
||||
}) |
||||
|
||||
t.Run("Lock blocks when locked", func(t *testing.T) { |
||||
m := New() |
||||
m.Lock() |
||||
lockIsBlocked(t, m) |
||||
}) |
||||
|
||||
t.Run("Unlock unblocks Lock", func(t *testing.T) { |
||||
m := New() |
||||
m.Lock() |
||||
m.Unlock() // nolint:staticcheck // SA2001 is not relevant here
|
||||
canLock(t, m) |
||||
}) |
||||
|
||||
t.Run("TryLock acquires lock", func(t *testing.T) { |
||||
m := New() |
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second) |
||||
t.Cleanup(cancel) |
||||
require.NoError(t, m.TryLock(ctx)) |
||||
lockIsBlocked(t, m) |
||||
}) |
||||
|
||||
t.Run("TryLock blocks until timeout when locked", func(t *testing.T) { |
||||
m := New() |
||||
m.Lock() |
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond) |
||||
t.Cleanup(cancel) |
||||
err := m.TryLock(ctx) |
||||
require.Equal(t, err, context.DeadlineExceeded) |
||||
}) |
||||
|
||||
t.Run("TryLock acquires lock before timeout", func(t *testing.T) { |
||||
m := New() |
||||
m.Lock() |
||||
|
||||
go func() { |
||||
time.Sleep(20 * time.Millisecond) |
||||
m.Unlock() |
||||
}() |
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second) |
||||
t.Cleanup(cancel) |
||||
err := m.TryLock(ctx) |
||||
require.NoError(t, err) |
||||
}) |
||||
|
||||
} |
||||
|
||||
func canLock(t *testing.T, m *Mutex) { |
||||
t.Helper() |
||||
chDone := make(chan struct{}) |
||||
go func() { |
||||
m.Lock() |
||||
close(chDone) |
||||
}() |
||||
|
||||
select { |
||||
case <-chDone: |
||||
case <-time.After(20 * time.Millisecond): |
||||
t.Fatal("failed to acquire lock before timeout") |
||||
} |
||||
} |
||||
|
||||
func lockIsBlocked(t *testing.T, m *Mutex) { |
||||
t.Helper() |
||||
chDone := make(chan struct{}) |
||||
go func() { |
||||
m.Lock() |
||||
close(chDone) |
||||
}() |
||||
|
||||
select { |
||||
case <-chDone: |
||||
t.Fatal("expected Lock to block") |
||||
case <-time.After(20 * time.Millisecond): |
||||
} |
||||
} |
@ -0,0 +1,136 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package semaphore provides a weighted semaphore implementation.
|
||||
package semaphore // import "golang.org/x/sync/semaphore"
|
||||
|
||||
import ( |
||||
"container/list" |
||||
"context" |
||||
"sync" |
||||
) |
||||
|
||||
type waiter struct { |
||||
n int64 |
||||
ready chan<- struct{} // Closed when semaphore acquired.
|
||||
} |
||||
|
||||
// NewWeighted creates a new weighted semaphore with the given
|
||||
// maximum combined weight for concurrent access.
|
||||
func NewWeighted(n int64) *Weighted { |
||||
w := &Weighted{size: n} |
||||
return w |
||||
} |
||||
|
||||
// Weighted provides a way to bound concurrent access to a resource.
|
||||
// The callers can request access with a given weight.
|
||||
type Weighted struct { |
||||
size int64 |
||||
cur int64 |
||||
mu sync.Mutex |
||||
waiters list.List |
||||
} |
||||
|
||||
// Acquire acquires the semaphore with a weight of n, blocking until resources
|
||||
// are available or ctx is done. On success, returns nil. On failure, returns
|
||||
// ctx.Err() and leaves the semaphore unchanged.
|
||||
//
|
||||
// If ctx is already done, Acquire may still succeed without blocking.
|
||||
func (s *Weighted) Acquire(ctx context.Context, n int64) error { |
||||
s.mu.Lock() |
||||
if s.size-s.cur >= n && s.waiters.Len() == 0 { |
||||
s.cur += n |
||||
s.mu.Unlock() |
||||
return nil |
||||
} |
||||
|
||||
if n > s.size { |
||||
// Don't make other Acquire calls block on one that's doomed to fail.
|
||||
s.mu.Unlock() |
||||
<-ctx.Done() |
||||
return ctx.Err() |
||||
} |
||||
|
||||
ready := make(chan struct{}) |
||||
w := waiter{n: n, ready: ready} |
||||
elem := s.waiters.PushBack(w) |
||||
s.mu.Unlock() |
||||
|
||||
select { |
||||
case <-ctx.Done(): |
||||
err := ctx.Err() |
||||
s.mu.Lock() |
||||
select { |
||||
case <-ready: |
||||
// Acquired the semaphore after we were canceled. Rather than trying to
|
||||
// fix up the queue, just pretend we didn't notice the cancelation.
|
||||
err = nil |
||||
default: |
||||
isFront := s.waiters.Front() == elem |
||||
s.waiters.Remove(elem) |
||||
// If we're at the front and there're extra tokens left, notify other waiters.
|
||||
if isFront && s.size > s.cur { |
||||
s.notifyWaiters() |
||||
} |
||||
} |
||||
s.mu.Unlock() |
||||
return err |
||||
|
||||
case <-ready: |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
// TryAcquire acquires the semaphore with a weight of n without blocking.
|
||||
// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
|
||||
func (s *Weighted) TryAcquire(n int64) bool { |
||||
s.mu.Lock() |
||||
success := s.size-s.cur >= n && s.waiters.Len() == 0 |
||||
if success { |
||||
s.cur += n |
||||
} |
||||
s.mu.Unlock() |
||||
return success |
||||
} |
||||
|
||||
// Release releases the semaphore with a weight of n.
|
||||
func (s *Weighted) Release(n int64) { |
||||
s.mu.Lock() |
||||
s.cur -= n |
||||
if s.cur < 0 { |
||||
s.mu.Unlock() |
||||
panic("semaphore: released more than held") |
||||
} |
||||
s.notifyWaiters() |
||||
s.mu.Unlock() |
||||
} |
||||
|
||||
func (s *Weighted) notifyWaiters() { |
||||
for { |
||||
next := s.waiters.Front() |
||||
if next == nil { |
||||
break // No more waiters blocked.
|
||||
} |
||||
|
||||
w := next.Value.(waiter) |
||||
if s.size-s.cur < w.n { |
||||
// Not enough tokens for the next waiter. We could keep going (to try to
|
||||
// find a waiter with a smaller request), but under load that could cause
|
||||
// starvation for large requests; instead, we leave all remaining waiters
|
||||
// blocked.
|
||||
//
|
||||
// Consider a semaphore used as a read-write lock, with N tokens, N
|
||||
// readers, and one writer. Each reader can Acquire(1) to obtain a read
|
||||
// lock. The writer can Acquire(N) to obtain a write lock, excluding all
|
||||
// of the readers. If we allow the readers to jump ahead in the queue,
|
||||
// the writer will starve — there is always one token available for every
|
||||
// reader.
|
||||
break |
||||
} |
||||
|
||||
s.cur += w.n |
||||
s.waiters.Remove(next) |
||||
close(w.ready) |
||||
} |
||||
} |
Loading…
Reference in new issue