
20 changed files with 1860 additions and 0 deletions
@ -0,0 +1,4 @@
|
||||
# SheepMQ |
||||
Aiming to be a robust message queue library and daemon with a variety of storage backends. |
||||
|
||||
# Still a work in progress |
@ -0,0 +1,34 @@
|
||||
package lease |
||||
|
||||
import "time" |
||||
|
||||
// Heart contains a lease that survives when heartbeats happen within a given ttl interval
|
||||
type Heart struct { |
||||
lastBeat time.Time |
||||
ttl time.Duration |
||||
} |
||||
|
||||
// NewHeart creates a new heart instance
|
||||
func NewHeart(ttl int64) *Heart { |
||||
ttlDuration := time.Nanosecond * time.Duration(ttl) |
||||
return &Heart{ |
||||
lastBeat: time.Now(), |
||||
ttl: ttlDuration, |
||||
} |
||||
} |
||||
|
||||
// Valid tries to beat the heart, true if still alive, false if dead
|
||||
func (h *Heart) Valid() bool { |
||||
if !h.Check() { |
||||
return false |
||||
} |
||||
|
||||
// update the last beat to now
|
||||
h.lastBeat = time.Now() |
||||
return true |
||||
} |
||||
|
||||
// Check if the heartbeat is valid
|
||||
func (h *Heart) Check() bool { |
||||
return time.Since(h.lastBeat) <= h.ttl |
||||
} |
@ -0,0 +1,52 @@
|
||||
package lease |
||||
|
||||
import ( |
||||
"testing" |
||||
"time" |
||||
) |
||||
|
||||
func TestHeartCheck(t *testing.T) { |
||||
heart := NewHeart(int64(time.Second)) |
||||
|
||||
if !heart.Check() { |
||||
t.Error("Heartbeat check failed first check too soon") |
||||
} |
||||
|
||||
time.Sleep(time.Second + time.Nanosecond) |
||||
|
||||
if heart.Check() { |
||||
t.Error("Heartbeat check succeeded after its ttl") |
||||
} |
||||
} |
||||
|
||||
func TestHeartValid(t *testing.T) { |
||||
heart := NewHeart(int64(time.Second)) |
||||
|
||||
if !heart.Valid() { |
||||
t.Error("Heartbeat valid failed first check too soon") |
||||
} |
||||
|
||||
time.Sleep(time.Second / 3) |
||||
|
||||
if !heart.Valid() { |
||||
t.Error("Heartbeat valid failed second check too soon") |
||||
} |
||||
|
||||
time.Sleep(time.Second / 2) |
||||
|
||||
if !heart.Valid() { |
||||
t.Error("Hearbeat valid failed third check too soon") |
||||
} |
||||
|
||||
time.Sleep(time.Second / 2) |
||||
|
||||
if !heart.Valid() { |
||||
t.Error("Heartbeat valid failed fourth check too soon") |
||||
} |
||||
|
||||
time.Sleep(time.Second) |
||||
|
||||
if heart.Valid() { |
||||
t.Error("Heartbeat valid succeeded after its ttl") |
||||
} |
||||
} |
@ -0,0 +1,10 @@
|
||||
package lease |
||||
|
||||
// Leaser denotes whether a given lease is still valid
|
||||
type Leaser interface { |
||||
// Update and check the leaser
|
||||
Valid() bool |
||||
|
||||
// Check the leaser without any sort of updates
|
||||
Check() bool |
||||
} |
@ -0,0 +1,94 @@
|
||||
package lease |
||||
|
||||
import ( |
||||
"errors" |
||||
"sync" |
||||
|
||||
"github.com/tblyler/sheepmq/shepard" |
||||
) |
||||
|
||||
var ( |
||||
// ErrLeased denotes the item is already leased
|
||||
ErrLeased = errors.New("Item is already leased") |
||||
|
||||
// ErrNoLeaser denotes no leaser was provided to add a lease
|
||||
ErrNoLeaser = errors.New("No leaser provided") |
||||
) |
||||
|
||||
// Manager contains many leases and their validity
|
||||
type Manager struct { |
||||
leases map[uint64]Leaser |
||||
locker sync.RWMutex |
||||
} |
||||
|
||||
// NewManager creates a new Manager instance
|
||||
func NewManager() *Manager { |
||||
return &Manager{ |
||||
leases: make(map[uint64]Leaser), |
||||
} |
||||
} |
||||
|
||||
// AddLease to the manager for the given item
|
||||
func (m *Manager) AddLease(id uint64, info *shepard.GetInfo) error { |
||||
if m.CheckLease(id) { |
||||
return ErrLeased |
||||
} |
||||
|
||||
var leaser Leaser |
||||
if info.TimeoutLease != nil { |
||||
leaser = NewTimeout(info.TimeoutLease.Ttl) |
||||
} else if info.PidLease != nil { |
||||
leaser = NewPID(int(info.PidLease.Pid)) |
||||
} else if info.HeartLease != nil { |
||||
leaser = NewHeart(info.HeartLease.Ttl) |
||||
} else { |
||||
return ErrNoLeaser |
||||
} |
||||
|
||||
m.locker.Lock() |
||||
m.leases[id] = leaser |
||||
m.locker.Unlock() |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// CheckLease for validity
|
||||
func (m *Manager) CheckLease(id uint64) bool { |
||||
m.locker.RLock() |
||||
lease, exists := m.leases[id] |
||||
if !exists { |
||||
m.locker.RUnlock() |
||||
return false |
||||
} |
||||
|
||||
ret := lease.Valid() |
||||
m.locker.RUnlock() |
||||
if !ret { |
||||
// delete the lease since it is no longer valid
|
||||
m.locker.Lock() |
||||
delete(m.leases, id) |
||||
m.locker.Unlock() |
||||
} |
||||
|
||||
return ret |
||||
} |
||||
|
||||
// PruneLeases that fail their checks
|
||||
func (m *Manager) PruneLeases() { |
||||
deleteKeys := []uint64{} |
||||
m.locker.RLock() |
||||
|
||||
for key, lease := range m.leases { |
||||
if !lease.Check() { |
||||
deleteKeys = append(deleteKeys, key) |
||||
} |
||||
} |
||||
|
||||
m.locker.RUnlock() |
||||
m.locker.Lock() |
||||
for _, key := range deleteKeys { |
||||
delete(m.leases, key) |
||||
} |
||||
|
||||
m.locker.Unlock() |
||||
} |
@ -0,0 +1,134 @@
|
||||
package lease |
||||
|
||||
import ( |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/tblyler/sheepmq/shepard" |
||||
) |
||||
|
||||
func TestManagerAddCheckLease(t *testing.T) { |
||||
manager := NewManager() |
||||
|
||||
info := &shepard.GetInfo{} |
||||
|
||||
err := manager.AddLease(123, info) |
||||
if err != ErrNoLeaser { |
||||
t.Error("Failed to get ErrNoLeaser on add lease got", err) |
||||
} |
||||
|
||||
info.PidLease = &shepard.PidLease{ |
||||
// use a bad PID
|
||||
Pid: 0, |
||||
} |
||||
|
||||
err = manager.AddLease(123, info) |
||||
if err != nil { |
||||
t.Error("Failed to add a crappy PID leaser", err) |
||||
} |
||||
|
||||
err = manager.AddLease(123, info) |
||||
if err != nil { |
||||
t.Error("Failed to add a crappy PID leaser ontop of another", err) |
||||
} |
||||
|
||||
info.PidLease = nil |
||||
|
||||
info.HeartLease = &shepard.HeartbeatLease{ |
||||
Ttl: int64(time.Second / 2), |
||||
} |
||||
|
||||
err = manager.AddLease(2, info) |
||||
if err != nil { |
||||
t.Error("Failed to add a valid heartbeat lease", err) |
||||
} |
||||
|
||||
if !manager.CheckLease(2) { |
||||
t.Error("Failed to check for valid heartbeat lease") |
||||
} |
||||
|
||||
info.HeartLease = nil |
||||
info.TimeoutLease = &shepard.TimeLease{ |
||||
Ttl: int64(time.Second), |
||||
} |
||||
|
||||
err = manager.AddLease(123, info) |
||||
if err != nil { |
||||
t.Error("Failed to add a good timeout lease of 1 second") |
||||
} |
||||
|
||||
err = manager.AddLease(123, info) |
||||
if err == nil { |
||||
t.Error("Should not be able to add a lease ontop of another valid one") |
||||
} |
||||
|
||||
err = manager.AddLease(124, info) |
||||
if err != nil { |
||||
t.Error("Failed to add a valid lease against a different id", err) |
||||
} |
||||
|
||||
if !manager.CheckLease(123) { |
||||
t.Error("Failed to make sure valid lease was valid") |
||||
} |
||||
|
||||
if !manager.CheckLease(124) { |
||||
t.Error("Failed to make sure valid lease was valid") |
||||
} |
||||
|
||||
time.Sleep(time.Second) |
||||
|
||||
if manager.CheckLease(123) { |
||||
t.Error("failed to make sure invalid lease was invalid") |
||||
} |
||||
|
||||
if manager.CheckLease(124) { |
||||
t.Error("failed to make sure invalid lease was invalid") |
||||
} |
||||
} |
||||
|
||||
func TestManagerPruneLeases(t *testing.T) { |
||||
manager := NewManager() |
||||
|
||||
info := &shepard.GetInfo{} |
||||
info.PidLease = &shepard.PidLease{ |
||||
Pid: 0, |
||||
} |
||||
|
||||
err := manager.AddLease(5, info) |
||||
if err != nil { |
||||
t.Error("Failed to add crappy PID lease", err) |
||||
} |
||||
|
||||
info.PidLease = nil |
||||
info.TimeoutLease = &shepard.TimeLease{ |
||||
Ttl: int64(time.Second), |
||||
} |
||||
|
||||
err = manager.AddLease(2, info) |
||||
if err != nil { |
||||
t.Error("Failed to add valid timeout lease", err) |
||||
} |
||||
|
||||
info.TimeoutLease = nil |
||||
info.HeartLease = &shepard.HeartbeatLease{ |
||||
Ttl: int64(time.Second), |
||||
} |
||||
|
||||
err = manager.AddLease(1, info) |
||||
if err != nil { |
||||
t.Error("Failed to add valid heartbeat lease", err) |
||||
} |
||||
|
||||
manager.PruneLeases() |
||||
|
||||
if len(manager.leases) != 2 { |
||||
t.Errorf("Should have 2 leases left, have %d", len(manager.leases)) |
||||
} |
||||
|
||||
time.Sleep(time.Second) |
||||
|
||||
manager.PruneLeases() |
||||
if len(manager.leases) != 0 { |
||||
t.Errorf("Should have 0 leases left, have %d", len(manager.leases)) |
||||
} |
||||
} |
@ -0,0 +1,34 @@
|
||||
package lease |
||||
|
||||
import ( |
||||
"os" |
||||
"syscall" |
||||
) |
||||
|
||||
// PID contains a lease that is valid until the given PID no longer exists
|
||||
type PID struct { |
||||
pid int |
||||
} |
||||
|
||||
// NewPID creates a new PID leaser instance
|
||||
func NewPID(pid int) *PID { |
||||
return &PID{ |
||||
pid: pid, |
||||
} |
||||
} |
||||
|
||||
// Valid checks if the PID still exists
|
||||
func (p *PID) Valid() bool { |
||||
return p.Check() |
||||
} |
||||
|
||||
// Check if the PID still exists
|
||||
func (p *PID) Check() bool { |
||||
process, err := os.FindProcess(p.pid) |
||||
if err != nil { |
||||
return false |
||||
} |
||||
|
||||
// if nil, PID exists
|
||||
return process.Signal(syscall.Signal(0)) == nil |
||||
} |
@ -0,0 +1,31 @@
|
||||
package lease |
||||
|
||||
import ( |
||||
"os/exec" |
||||
"testing" |
||||
) |
||||
|
||||
func TestPIDValid(t *testing.T) { |
||||
// create a sleep command for 1 second
|
||||
cmd := exec.Command("sleep", "1") |
||||
err := cmd.Start() |
||||
if err != nil { |
||||
t.Fatal("Failed to create a sleep process:", err) |
||||
} |
||||
|
||||
pid := NewPID(cmd.Process.Pid) |
||||
if !pid.Valid() { |
||||
t.Error("PID died too soon") |
||||
} |
||||
|
||||
cmd.Wait() |
||||
|
||||
if pid.Valid() { |
||||
t.Error("PID didn't die somehow") |
||||
} |
||||
|
||||
pid = NewPID(-1) |
||||
if pid.Valid() { |
||||
t.Error("Negative PIDS are not a thing") |
||||
} |
||||
} |
@ -0,0 +1,27 @@
|
||||
package lease |
||||
|
||||
import ( |
||||
"time" |
||||
) |
||||
|
||||
// Timeout expires after a given timeout
|
||||
type Timeout struct { |
||||
eol time.Time |
||||
} |
||||
|
||||
// NewTimeout creates a new timeout instance
|
||||
func NewTimeout(ttl int64) *Timeout { |
||||
return &Timeout{ |
||||
eol: time.Now().Add(time.Nanosecond * time.Duration(ttl)), |
||||
} |
||||
} |
||||
|
||||
// Valid Determines whether the timeout has been reached
|
||||
func (t *Timeout) Valid() bool { |
||||
return t.Check() |
||||
} |
||||
|
||||
// Check if the timeout has been reached
|
||||
func (t *Timeout) Check() bool { |
||||
return time.Now().Before(t.eol) |
||||
} |
@ -0,0 +1,26 @@
|
||||
package lease |
||||
|
||||
import ( |
||||
"testing" |
||||
"time" |
||||
) |
||||
|
||||
func TestTimeoutValid(t *testing.T) { |
||||
timeout := NewTimeout(int64(time.Second)) |
||||
|
||||
if !timeout.Valid() { |
||||
t.Error("timeout valid failed too soon") |
||||
} |
||||
|
||||
time.Sleep(time.Nanosecond * 100) |
||||
|
||||
if !timeout.Valid() { |
||||
t.Error("timeout valid failed too soon") |
||||
} |
||||
|
||||
time.Sleep(time.Second - (time.Nanosecond * 100)) |
||||
|
||||
if timeout.Valid() { |
||||
t.Error("timeout valid should not be succeeding") |
||||
} |
||||
} |
@ -0,0 +1,52 @@
|
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net" |
||||
|
||||
"google.golang.org/grpc" |
||||
|
||||
flag "github.com/spf13/pflag" |
||||
"github.com/tblyler/sheepmq/sheepmq" |
||||
"github.com/tblyler/sheepmq/shepard" |
||||
) |
||||
|
||||
func main() { |
||||
var port uint16 |
||||
var listenAddr string |
||||
|
||||
flag.StringVarP(&listenAddr, "addr", "h", "", "The address to listen on") |
||||
flag.Uint16VarP(&port, "port", "p", 0, "The port to bind") |
||||
|
||||
flag.Parse() |
||||
|
||||
listenAddr = fmt.Sprintf("%s:%d", listenAddr, port) |
||||
|
||||
listener, err := net.Listen("tcp", listenAddr) |
||||
if err != nil { |
||||
fmt.Printf("Failed to listen on %s: %s", listenAddr, err) |
||||
return |
||||
} |
||||
|
||||
defer listener.Close() |
||||
|
||||
fmt.Println("Listening on", listener.Addr()) |
||||
|
||||
grpcServer := grpc.NewServer() |
||||
|
||||
sheepmqServer, err := sheepmq.NewServer() |
||||
if err != nil { |
||||
fmt.Println("Failed to open sheepmq server:", err) |
||||
return |
||||
} |
||||
|
||||
sheepmqGServer := sheepmq.NewGServer(sheepmqServer) |
||||
if err != nil { |
||||
fmt.Println("Failed to create sheepmq server:", err) |
||||
return |
||||
} |
||||
|
||||
shepard.RegisterLoqServer(grpcServer, sheepmqServer) |
||||
|
||||
grpcServer.Serve(listener) |
||||
} |
@ -0,0 +1,157 @@
|
||||
package queue |
||||
|
||||
import ( |
||||
"os" |
||||
"sync/atomic" |
||||
|
||||
"github.com/dgraph-io/badger/badger" |
||||
"github.com/golang/protobuf/proto" |
||||
"github.com/tblyler/sheepmq/lease" |
||||
"github.com/tblyler/sheepmq/shepard" |
||||
) |
||||
|
||||
// Badger uses Badger KV store as a queue backend
|
||||
type Badger struct { |
||||
opts *Options |
||||
bopts *badger.Options |
||||
kv *badger.KV |
||||
currentID uint64 |
||||
idconv *idConverter |
||||
leases *lease.Manager |
||||
} |
||||
|
||||
// NewBadger creates a new instance of a Badger-backed Queue
|
||||
func NewBadger(opts *Options) (*Badger, error) { |
||||
// use default badger options if none provided
|
||||
var bopts badger.Options |
||||
if opts.BadgerOptions == nil { |
||||
bopts = badger.DefaultOptions |
||||
} else { |
||||
bopts = *opts.BadgerOptions |
||||
} |
||||
|
||||
// make sure the directory exists
|
||||
err := os.MkdirAll(opts.Dir, defaultFilePerm) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// always honor Options' dir setting over badger.Options' dir settings
|
||||
bopts.Dir = opts.Dir |
||||
bopts.ValueDir = opts.Dir |
||||
|
||||
// try to open new badger key value instance with the given options
|
||||
kv, err := badger.NewKV(&bopts) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
var currentID uint64 |
||||
|
||||
iter := kv.NewIterator(badger.IteratorOptions{ |
||||
PrefetchSize: 5, |
||||
FetchValues: false, |
||||
Reverse: true, |
||||
}) |
||||
|
||||
defer iter.Close() |
||||
|
||||
for iter.Rewind(); iter.Valid(); iter.Next() { |
||||
currentID, err = byteToID(iter.Item().Key()) |
||||
if err == nil { |
||||
break |
||||
} |
||||
|
||||
// try to delete invalid entries
|
||||
kv.Delete(iter.Item().Key()) |
||||
currentID = 0 |
||||
} |
||||
|
||||
return &Badger{ |
||||
opts: opts, |
||||
bopts: &bopts, |
||||
kv: kv, |
||||
currentID: currentID, |
||||
idconv: newIDConverter(), |
||||
leases: lease.NewManager(), |
||||
}, nil |
||||
} |
||||
|
||||
// Close the internal key value store
|
||||
func (b *Badger) Close() error { |
||||
return b.kv.Close() |
||||
} |
||||
|
||||
// Get the next available ID atomically
|
||||
func (b *Badger) getID() uint64 { |
||||
return atomic.AddUint64(&b.currentID, 1) |
||||
} |
||||
|
||||
// AddItem to the queue
|
||||
func (b *Badger) AddItem(item *shepard.Item) error { |
||||
item.Id = b.getID() |
||||
|
||||
data, err := proto.Marshal(item) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
byteID := b.idconv.idToByte(item.Id) |
||||
defer b.idconv.put(byteID) |
||||
|
||||
return b.kv.Set(byteID, data) |
||||
} |
||||
|
||||
// GetItem from the queue
|
||||
func (b *Badger) GetItem(info *shepard.GetInfo, itemChan chan<- *shepard.Item) error { |
||||
iter := b.kv.NewIterator(badger.IteratorOptions{ |
||||
PrefetchSize: 500, |
||||
FetchValues: true, |
||||
Reverse: false, |
||||
}) |
||||
|
||||
defer iter.Close() |
||||
|
||||
var count uint64 |
||||
for iter.Rewind(); iter.Valid() && count < info.Count; iter.Next() { |
||||
item := iter.Item() |
||||
id, err := byteToID(item.Key()) |
||||
if err != nil { |
||||
// try to delete bad keys (don't care about failures)
|
||||
b.kv.Delete(item.Key()) |
||||
continue |
||||
} |
||||
|
||||
err = b.leases.AddLease(id, info) |
||||
if err == nil || err == lease.ErrNoLeaser { |
||||
ret := &shepard.Item{} |
||||
err = proto.Unmarshal(item.Value(), ret) |
||||
if err != nil { |
||||
// delete bad values
|
||||
b.kv.Delete(item.Key()) |
||||
continue |
||||
} |
||||
|
||||
count++ |
||||
itemChan <- ret |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// DelItem from the queue
|
||||
func (b *Badger) DelItem(info *shepard.DelInfo) error { |
||||
var err error |
||||
for _, id := range info.GetIds() { |
||||
idByte := b.idconv.idToByte(id) |
||||
err = b.kv.Delete(idByte) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
b.idconv.put(idByte) |
||||
} |
||||
|
||||
return nil |
||||
} |
@ -0,0 +1,233 @@
|
||||
package queue |
||||
|
||||
import ( |
||||
"bytes" |
||||
"crypto/rand" |
||||
"fmt" |
||||
"os" |
||||
"path/filepath" |
||||
"reflect" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/dgraph-io/badger/badger" |
||||
"github.com/tblyler/sheepmq/shepard" |
||||
) |
||||
|
||||
func TestNewBadger(t *testing.T) { |
||||
// test a bad dir setting
|
||||
opts := &Options{ |
||||
Dir: "", |
||||
} |
||||
|
||||
_, err := NewBadger(opts) |
||||
if err == nil { |
||||
t.Error("Failed to get error on bad Dir setting") |
||||
} |
||||
|
||||
opts.Dir = filepath.Join(os.TempDir(), "NewBadgerTest") |
||||
|
||||
// ensure clean directory
|
||||
os.RemoveAll(opts.Dir) |
||||
defer os.RemoveAll(opts.Dir) |
||||
|
||||
b, err := NewBadger(opts) |
||||
if err != nil { |
||||
t.Fatal("Failed to create badger with default options") |
||||
} |
||||
|
||||
if b.currentID != 0 { |
||||
t.Error("Current ID of an empty badger db should be 0 not", b.currentID) |
||||
} |
||||
|
||||
err = b.AddItem(&shepard.Item{}) |
||||
if err != nil { |
||||
t.Fatal("Failed to add empty item", err) |
||||
} |
||||
|
||||
if b.currentID != 1 { |
||||
t.Error("Current ID of a one item db should be 1 not", b.currentID) |
||||
} |
||||
|
||||
b.Close() |
||||
|
||||
opts.BadgerOptions = &badger.Options{} |
||||
|
||||
// make sure custom badger options are honored
|
||||
*opts.BadgerOptions = badger.DefaultOptions |
||||
opts.BadgerOptions.SyncWrites = !opts.BadgerOptions.SyncWrites |
||||
|
||||
b, err = NewBadger(opts) |
||||
if err != nil { |
||||
t.Fatal("Failed to use custom badger optoins", err) |
||||
} |
||||
|
||||
defer b.Close() |
||||
|
||||
if b.bopts.SyncWrites != opts.BadgerOptions.SyncWrites { |
||||
t.Error("Failed to use custom badger options") |
||||
} |
||||
|
||||
if b.currentID != 1 { |
||||
t.Error("current id != 1 got", b.currentID) |
||||
} |
||||
} |
||||
|
||||
func TestBadgerAddGetItem(t *testing.T) { |
||||
items := make([]*shepard.Item, 32) |
||||
|
||||
for i := range items { |
||||
items[i] = &shepard.Item{} |
||||
|
||||
items[i].Data = make([]byte, 256*(i+1)) |
||||
rand.Read(items[i].Data) |
||||
items[i].Ctime = time.Now().Unix() |
||||
items[i].Queue = fmt.Sprint("testing", i) |
||||
items[i].Stats = map[string]int64{ |
||||
"cool": 133333337, |
||||
"notcool": 0, |
||||
"#1": 1, |
||||
"datSize": int64(len(items[i].Data)), |
||||
} |
||||
} |
||||
|
||||
opts := &Options{ |
||||
Dir: filepath.Join(os.TempDir(), "TestBadgerAddGetItem"), |
||||
} |
||||
|
||||
os.RemoveAll(opts.Dir) |
||||
defer os.RemoveAll(opts.Dir) |
||||
|
||||
b, err := NewBadger(opts) |
||||
if err != nil { |
||||
t.Fatal("Failed to open badger", err) |
||||
} |
||||
|
||||
defer b.Close() |
||||
|
||||
for i, item := range items { |
||||
err = b.AddItem(item) |
||||
if err != nil { |
||||
t.Error("Failed to add item", i, err) |
||||
} |
||||
} |
||||
|
||||
itemChan := make(chan *shepard.Item, len(items)) |
||||
err = b.GetItem(&shepard.GetInfo{ |
||||
Count: uint64(len(items)), |
||||
}, itemChan) |
||||
|
||||
close(itemChan) |
||||
if err != nil { |
||||
t.Error("Failed to get items", err) |
||||
} |
||||
|
||||
i := 0 |
||||
for item := range itemChan { |
||||
if item.Ctime != items[i].Ctime { |
||||
t.Error("item ctimes", item.Ctime, items[i].Ctime) |
||||
} |
||||
|
||||
if item.Queue != items[i].Queue { |
||||
t.Error("item queues", item.Queue, items[i].Queue) |
||||
} |
||||
|
||||
if !bytes.Equal(item.Data, items[i].Data) { |
||||
t.Error("item data", item.Data, items[i].Data) |
||||
} |
||||
|
||||
if !reflect.DeepEqual(item.Stats, items[i].Stats) { |
||||
t.Error("item stats", item.Stats, items[i].Stats) |
||||
} |
||||
|
||||
i++ |
||||
} |
||||
|
||||
if i != len(items) { |
||||
t.Error("got", i, "items expected", len(items)) |
||||
} |
||||
} |
||||
|
||||
func TestBadgerDelItem(t *testing.T) { |
||||
opts := &Options{ |
||||
Dir: filepath.Join(os.TempDir(), "TestBadgerAddGetItem"), |
||||
} |
||||
|
||||
os.RemoveAll(opts.Dir) |
||||
defer os.RemoveAll(opts.Dir) |
||||
|
||||
b, err := NewBadger(opts) |
||||
if err != nil { |
||||
t.Fatal("Failed to start badger", err) |
||||
} |
||||
|
||||
items := make([]*shepard.Item, 32) |
||||
for i := range items { |
||||
items[i] = &shepard.Item{ |
||||
Ctime: time.Now().Unix(), |
||||
Data: make([]byte, 256*(i+1)), |
||||
Queue: "The queue", |
||||
Stats: map[string]int64{ |
||||
"lol": 10101010101, |
||||
"index": int64(i), |
||||
"datasize:": int64(256 * (i + 1)), |
||||
}, |
||||
} |
||||
rand.Read(items[i].Data) |
||||
|
||||
err = b.AddItem(items[i]) |
||||
if err != nil { |
||||
t.Error("Failed to add item", i, err) |
||||
} |
||||
} |
||||
|
||||
delinfo := &shepard.DelInfo{} |
||||
for i := range items { |
||||
if i%2 == 0 { |
||||
continue |
||||
} |
||||
|
||||
delinfo.Ids = append(delinfo.Ids, uint64(i)) |
||||
} |
||||
|
||||
err = b.DelItem(delinfo) |
||||
if err != nil { |
||||
t.Error("Failed to delete", delinfo.Ids, err) |
||||
} |
||||
|
||||
getinfo := &shepard.GetInfo{ |
||||
Count: uint64(len(items) - len(delinfo.Ids)), |
||||
} |
||||
getChan := make(chan *shepard.Item, getinfo.Count) |
||||
|
||||
err = b.GetItem(getinfo, getChan) |
||||
if err != nil { |
||||
t.Error("Failed to get items", err) |
||||
} |
||||
|
||||
close(getChan) |
||||
|
||||
i := 1 |
||||
for item := range getChan { |
||||
if item.Ctime != items[i].Ctime { |
||||
t.Error("item ctimes", item.Ctime, items[i].Ctime) |
||||
} |
||||
|
||||
if item.Queue != items[i].Queue { |
||||
t.Error("item queues", item.Queue, items[i].Queue) |
||||
} |
||||
|
||||
if !bytes.Equal(item.Data, items[i].Data) { |
||||
t.Error("item data", item.Data, items[i].Data) |
||||
} |
||||
|
||||
if !reflect.DeepEqual(item.Stats, items[i].Stats) { |
||||
t.Error("item stats", item.Stats, items[i].Stats) |
||||
} |
||||
i += 2 |
||||
} |
||||
|
||||
if i != len(items)+1 { |
||||
t.Error("only looped to item index", i) |
||||
} |
||||
} |
@ -0,0 +1,70 @@
|
||||
package queue |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
"fmt" |
||||
"os" |
||||
"sync" |
||||
|
||||
"github.com/dgraph-io/badger/badger" |
||||
"github.com/tblyler/sheepmq/shepard" |
||||
) |
||||
|
||||
const ( |
||||
idByteSize = 8 |
||||
defaultFilePerm = os.FileMode(0700) |
||||
) |
||||
|
||||
// Queue defines a resource to store queue items
|
||||
type Queue interface { |
||||
AddItem(*shepard.Item) error |
||||
GetItem(*shepard.GetInfo) (*shepard.Item, error) |
||||
DelItem(*shepard.DelInfo) error |
||||
} |
||||
|
||||
// Options to be used when creating a new Queue
|
||||
type Options struct { |
||||
// Directory to store queue data
|
||||
Dir string |
||||
|
||||
// Badger queue specific options
|
||||
BadgerOptions *badger.Options |
||||
} |
||||
|
||||
type idConverter struct { |
||||
pool sync.Pool |
||||
} |
||||
|
||||
func newIDConverter() *idConverter { |
||||
return &idConverter{ |
||||
pool: sync.Pool{ |
||||
New: func() interface{} { |
||||
return make([]byte, idByteSize) |
||||
}, |
||||
}, |
||||
} |
||||
} |
||||
|
||||
func (i *idConverter) idToByte(id uint64) []byte { |
||||
buf := i.pool.Get().([]byte) |
||||
|
||||
binary.LittleEndian.PutUint64(buf, id) |
||||
|
||||
return buf |
||||
} |
||||
|
||||
func (i *idConverter) put(data []byte) { |
||||
i.pool.Put(data) |
||||
} |
||||
|
||||
func byteToID(data []byte) (uint64, error) { |
||||
if len(data) < idByteSize { |
||||
return 0, fmt.Errorf( |
||||
"unable to convert byte slice length of %d, need at least %d", |
||||
len(data), |
||||
idByteSize, |
||||
) |
||||
} |
||||
|
||||
return binary.LittleEndian.Uint64(data), nil |
||||
} |
@ -0,0 +1,67 @@
|
||||
package queue |
||||
|
||||
import ( |
||||
"bytes" |
||||
"math" |
||||
"reflect" |
||||
"testing" |
||||
) |
||||
|
||||
func TestIDConverter(t *testing.T) { |
||||
idconv := newIDConverter() |
||||
|
||||
idByte := idconv.idToByte(math.MaxUint64) |
||||
|
||||
// reserved keywords become english mistakes
|
||||
for _, bite := range idByte { |
||||
if bite != byte(255) { |
||||
t.Error("maxuint64 is not all max bytes", idByte) |
||||
break |
||||
} |
||||
} |
||||
|
||||
idconv.put(idByte) |
||||
|
||||
firstAddress := reflect.ValueOf(idByte).Pointer() |
||||
|
||||
idByte = idconv.idToByte(0) |
||||
secondAddress := reflect.ValueOf(idByte).Pointer() |
||||
|
||||
if firstAddress != secondAddress { |
||||
t.Error("Failed to use byte pool") |
||||
} |
||||
|
||||
for _, bite := range idByte { |
||||
if bite != 0 { |
||||
t.Error("zero should be all zero bytes", idByte) |
||||
break |
||||
} |
||||
} |
||||
|
||||
idconv.put(idByte) |
||||
|
||||
id := uint64(582348138342) |
||||
idByte = idconv.idToByte(id) |
||||
knownByte := []byte{ |
||||
102, 103, 167, 150, 135, 0, 0, 0, |
||||
} |
||||
|
||||
if !bytes.Equal(idByte, knownByte) { |
||||
t.Error("Failed to encode id exepect", knownByte, "got", idByte) |
||||
} |
||||
|
||||
idconv.put(idByte) |
||||
|
||||
newID, err := byteToID(knownByte) |
||||
if err != nil { |
||||
t.Error("error converting byte to id", err) |
||||
} |
||||
if newID != id { |
||||
t.Error("expected id", id, "got", newID) |
||||
} |
||||
|
||||
_, err = byteToID([]byte{1, 2, 3, 4, 5}) |
||||
if err == nil { |
||||
t.Error("Failed to get error for bad byte to id data") |
||||
} |
||||
} |
@ -0,0 +1,74 @@
|
||||
package sheepmq |
||||
|
||||
import ( |
||||
"io" |
||||
|
||||
"github.com/tblyler/sheepmq/shepard" |
||||
|
||||
context "golang.org/x/net/context" |
||||
) |
||||
|
||||
// GServer encapsulates all sheepmq GRPC server activity
|
||||
type GServer struct { |
||||
sheepmq *SheepMQ |
||||
} |
||||
|
||||
// NewGServer creates a new sheepmq GRPC server instance
|
||||
func NewGServer(sheepmq *SheepMQ) *GServer { |
||||
return &GServer{ |
||||
sheepmq: sheepmq, |
||||
} |
||||
} |
||||
|
||||
// AddItem to sheepmq's queue
|
||||
func (l *GServer) AddItem(stream shepard.Sheepmq_AddItemServer) error { |
||||
for { |
||||
item, err := stream.Recv() |
||||
if err == io.EOF { |
||||
return nil |
||||
} |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
resp, _ := l.sheepmq.AddItem(item) |
||||
err = stream.Send(resp) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
if !resp.GetSuccess() { |
||||
return nil |
||||
} |
||||
} |
||||
} |
||||
|
||||
// GetItem from sheepmq's queue
|
||||
func (l *GServer) GetItem(info *shepard.GetInfo, stream shepard.Sheepmq_GetItemServer) error { |
||||
items := make(chan *shepard.Item, 32) |
||||
|
||||
var err error |
||||
go func() { |
||||
err = l.sheepmq.GetItems(info, items) |
||||
close(items) |
||||
}() |
||||
|
||||
for item := range items { |
||||
err := stream.Send(item) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
return err |
||||
} |
||||
|
||||
// DelItem from sheepmq's queue
|
||||
func (l *GServer) DelItem(ctx context.Context, info *shepard.DelInfo) (*shepard.Response, error) { |
||||
return l.sheepmq.DelItem(info) |
||||
} |
||||
|
||||
// ErrItem from sheepmq's queue
|
||||
func (l *GServer) ErrItem(ctx context.Context, info *shepard.ErrInfo) (*shepard.Response, error) { |
||||
return l.sheepmq.ErrItem(info) |
||||
} |
@ -0,0 +1,38 @@
|
||||
package sheepmq |
||||
|
||||
import ( |
||||
"github.com/tblyler/sheepmq/queue" |
||||
"github.com/tblyler/sheepmq/shepard" |
||||
) |
||||
|
||||
// SheepMQ encapsulates the sheepmq queue logic
|
||||
type SheepMQ struct { |
||||
queues map[string]*queue.Queue |
||||
} |
||||
|
||||
// NewSheepMQ creates a new sheepmq instance with the given configuration
|
||||
func NewSheepMQ() (*SheepMQ, error) { |
||||
return &SheepMQ{ |
||||
queues: make(map[string]*queue.Queue), |
||||
}, nil |
||||
} |
||||
|
||||
// AddItem to the sheepmq queue
|
||||
func (l *SheepMQ) AddItem(item *shepard.Item) (*shepard.Response, error) { |
||||
return nil, nil |
||||
} |
||||
|
||||
// GetItems from sheepmq's queue
|
||||
func (l *SheepMQ) GetItems(info *shepard.GetInfo, items chan<- *shepard.Item) error { |
||||
return nil |
||||
} |
||||
|
||||
// DelItem from sheepmq's queue
|
||||
func (l *SheepMQ) DelItem(info *shepard.DelInfo) (*shepard.Response, error) { |
||||
return nil, nil |
||||
} |
||||
|
||||
// ErrItem from sheepmq's queue
|
||||
func (l *SheepMQ) ErrItem(info *shepard.ErrInfo) (*shepard.Response, error) { |
||||
return nil, nil |
||||
} |
@ -0,0 +1,607 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: shepard.proto
|
||||
|
||||
/* |
||||
Package shepard is a generated protocol buffer package. |
||||
|
||||
It is generated from these files: |
||||
shepard.proto |
||||
|
||||
It has these top-level messages: |
||||
Response |
||||
Item |
||||
GetInfo |
||||
TimeLease |
||||
PidLease |
||||
HeartbeatLease |
||||
DelInfo |
||||
ErrInfo |
||||
*/ |
||||
package shepard |
||||
|
||||
import proto "github.com/golang/protobuf/proto" |
||||
import fmt "fmt" |
||||
import math "math" |
||||
|
||||
import ( |
||||
context "golang.org/x/net/context" |
||||
grpc "google.golang.org/grpc" |
||||
) |
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal |
||||
var _ = fmt.Errorf |
||||
var _ = math.Inf |
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Response struct { |
||||
// whether or not the operation was successful
|
||||
Success bool `protobuf:"varint,1,opt,name=success" json:"success,omitempty"` |
||||
// the amount of items affected
|
||||
Count uint64 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` |
||||
// error message on failure
|
||||
Msg string `protobuf:"bytes,3,opt,name=msg" json:"msg,omitempty"` |
||||
} |
||||
|
||||
func (m *Response) Reset() { *m = Response{} } |
||||
func (m *Response) String() string { return proto.CompactTextString(m) } |
||||
func (*Response) ProtoMessage() {} |
||||
func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } |
||||
|
||||
func (m *Response) GetSuccess() bool { |
||||
if m != nil { |
||||
return m.Success |
||||
} |
||||
return false |
||||
} |
||||
|
||||
func (m *Response) GetCount() uint64 { |
||||
if m != nil { |
||||
return m.Count |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
func (m *Response) GetMsg() string { |
||||
if m != nil { |
||||
return m.Msg |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
type Item struct { |
||||
// the id for this item
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` |
||||
// the arbitrary data for this item
|
||||
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` |
||||
// the queue for this item
|
||||
Queue string `protobuf:"bytes,3,opt,name=queue" json:"queue,omitempty"` |
||||
// error queue to cycle items to
|
||||
ErrorQueue string `protobuf:"bytes,4,opt,name=errorQueue" json:"errorQueue,omitempty"` |
||||
// error queue TTL in nanoseconds
|
||||
ErrorTTL int64 `protobuf:"fixed64,5,opt,name=errorTTL" json:"errorTTL,omitempty"` |
||||
// the Unix time (in seconds) in which this item was created
|
||||
Ctime int64 `protobuf:"fixed64,6,opt,name=ctime" json:"ctime,omitempty"` |
||||
// the Unix time (in seconds) in which this item was errored last
|
||||
Etime int64 `protobuf:"fixed64,7,opt,name=etime" json:"etime,omitempty"` |
||||
// the amount of times this item was errored
|
||||
Ecount uint32 `protobuf:"varint,8,opt,name=ecount" json:"ecount,omitempty"` |
||||
// arbitrary statistical sizes to record for this item
|
||||
Stats map[string]int64 `protobuf:"bytes,9,rep,name=stats" json:"stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` |
||||
} |
||||
|
||||
func (m *Item) Reset() { *m = Item{} } |
||||
func (m *Item) String() string { return proto.CompactTextString(m) } |
||||
func (*Item) ProtoMessage() {} |
||||
func (*Item) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } |
||||
|
||||
func (m *Item) GetId() uint64 { |
||||
if m != nil { |
||||
return m.Id |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
func (m *Item) GetData() []byte { |
||||
if m != nil { |
||||
return m.Data |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *Item) GetQueue() string { |
||||
if m != nil { |
||||
return m.Queue |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (m *Item) GetErrorQueue() string { |
||||
if m != nil { |
||||
return m.ErrorQueue |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (m *Item) GetErrorTTL() int64 { |
||||
if m != nil { |
||||
return m.ErrorTTL |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
func (m *Item) GetCtime() int64 { |
||||
if m != nil { |
||||
return m.Ctime |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
func (m *Item) GetEtime() int64 { |
||||
if m != nil { |
||||
return m.Etime |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
func (m *Item) GetEcount() uint32 { |
||||
if m != nil { |
||||
return m.Ecount |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
func (m *Item) GetStats() map[string]int64 { |
||||
if m != nil { |
||||
return m.Stats |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
type GetInfo struct { |
||||
Queue string `protobuf:"bytes,1,opt,name=queue" json:"queue,omitempty"` |
||||
// the amount of items to try and pull
|
||||
Count uint64 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` |
||||
TimeoutLease *TimeLease `protobuf:"bytes,3,opt,name=timeoutLease" json:"timeoutLease,omitempty"` |
||||
PidLease *PidLease `protobuf:"bytes,4,opt,name=pidLease" json:"pidLease,omitempty"` |
||||
HeartLease *HeartbeatLease `protobuf:"bytes,5,opt,name=heartLease" json:"heartLease,omitempty"` |
||||
} |
||||
|
||||
func (m *GetInfo) Reset() { *m = GetInfo{} } |
||||
func (m *GetInfo) String() string { return proto.CompactTextString(m) } |
||||
func (*GetInfo) ProtoMessage() {} |
||||
func (*GetInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } |
||||
|
||||
func (m *GetInfo) GetQueue() string { |
||||
if m != nil { |
||||
return m.Queue |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (m *GetInfo) GetCount() uint64 { |
||||
if m != nil { |
||||
return m.Count |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
func (m *GetInfo) GetTimeoutLease() *TimeLease { |
||||
if m != nil { |
||||
return m.TimeoutLease |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *GetInfo) GetPidLease() *PidLease { |
||||
if m != nil { |
||||
return m.PidLease |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *GetInfo) GetHeartLease() *HeartbeatLease { |
||||
if m != nil { |
||||
return m.HeartLease |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
type TimeLease struct { |
||||
// TTL in nanoseconds to hold the lease
|
||||
Ttl int64 `protobuf:"fixed64,1,opt,name=ttl" json:"ttl,omitempty"` |
||||
} |
||||
|
||||
func (m *TimeLease) Reset() { *m = TimeLease{} } |
||||
func (m *TimeLease) String() string { return proto.CompactTextString(m) } |
||||
func (*TimeLease) ProtoMessage() {} |
||||
func (*TimeLease) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } |
||||
|
||||
func (m *TimeLease) GetTtl() int64 { |
||||
if m != nil { |
||||
return m.Ttl |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
type PidLease struct { |
||||
Pid uint32 `protobuf:"varint,1,opt,name=pid" json:"pid,omitempty"` |
||||
} |
||||
|
||||
func (m *PidLease) Reset() { *m = PidLease{} } |
||||
func (m *PidLease) String() string { return proto.CompactTextString(m) } |
||||
func (*PidLease) ProtoMessage() {} |
||||
func (*PidLease) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } |
||||
|
||||
func (m *PidLease) GetPid() uint32 { |
||||
if m != nil { |
||||
return m.Pid |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
type HeartbeatLease struct { |
||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` |
||||
Ttl int64 `protobuf:"fixed64,2,opt,name=ttl" json:"ttl,omitempty"` |
||||
} |
||||
|
||||
func (m *HeartbeatLease) Reset() { *m = HeartbeatLease{} } |
||||
func (m *HeartbeatLease) String() string { return proto.CompactTextString(m) } |
||||
func (*HeartbeatLease) ProtoMessage() {} |
||||
func (*HeartbeatLease) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } |
||||
|
||||
func (m *HeartbeatLease) GetId() string { |
||||
if m != nil { |
||||
return m.Id |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (m *HeartbeatLease) GetTtl() int64 { |
||||
if m != nil { |
||||
return m.Ttl |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
type DelInfo struct { |
||||
Queue string `protobuf:"bytes,1,opt,name=queue" json:"queue,omitempty"` |
||||
Ids []uint64 `protobuf:"varint,2,rep,packed,name=ids" json:"ids,omitempty"` |
||||
} |
||||
|
||||
func (m *DelInfo) Reset() { *m = DelInfo{} } |
||||
func (m *DelInfo) String() string { return proto.CompactTextString(m) } |
||||
func (*DelInfo) ProtoMessage() {} |
||||
func (*DelInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } |
||||
|
||||
func (m *DelInfo) GetQueue() string { |
||||
if m != nil { |
||||
return m.Queue |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (m *DelInfo) GetIds() []uint64 { |
||||
if m != nil { |
||||
return m.Ids |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
type ErrInfo struct { |
||||
Queue string `protobuf:"bytes,1,opt,name=queue" json:"queue,omitempty"` |
||||
Ids []uint64 `protobuf:"varint,2,rep,packed,name=ids" json:"ids,omitempty"` |
||||
} |
||||
|
||||
func (m *ErrInfo) Reset() { *m = ErrInfo{} } |
||||
func (m *ErrInfo) String() string { return proto.CompactTextString(m) } |
||||
func (*ErrInfo) ProtoMessage() {} |
||||
func (*ErrInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } |
||||
|
||||