// Demonstration of simple amortized-constant-time unlimited-size // queue. A ring buffer would be more efficient, but this is simpler. // When the head becomes empty, we move the tail elements to it in // linear time; this is amortized constant time because the append() // operation is amortized constant time, and it runs exactly once for // each element that is first Pushed and later Shifted. We keep using // slices of the same underlying arrays when possible so that the // queue will not keep reallocating if its maximum size does not // increase. // It would probably also be more efficient in most cases to make this // generic, but I don't know how Golang generics work. // A ring buffer would be faster in the average case because it // doesn't waste time copying elements in the steady state and in the // worst case because it doesn't pause to copy all the elements when // it wraps around. A simple unlimited-capacity ring buffer also has // to pause to reallocate when it gets full. But, for example, a // circularly-linked list of ring-buffer blocks could bound those // pauses. If you want bounded pauses, Golang probably isn't the // language for you. // Also, if you want a limited-capacity queue in Golang, maybe use a // channel. package main import ( "errors" "fmt" ) type Queue struct { head, tail []interface{} } func (q *Queue) Push(elem interface{}) { q.tail = append(q.tail, elem) } func (q *Queue) Shift() (interface{}, error) { if len(q.head) == 0 { if len(q.tail) == 0 { return nil, errors.New("queue empty") } n := len(q.tail) - 1 for i := range q.tail { q.head = append(q.head, q.tail[n-i]) } q.tail = q.tail[:0] // can't be nil, but this would work for nil } hl := len(q.head) - 1 elem := q.head[hl] q.head = q.head[:hl] return elem, nil } func main() { q := Queue{} fmt.Println(q.Shift()) q.Push("harder") q.Push("better") fmt.Println(q.Shift()) q.Push("faster") q.Push("stronger") fmt.Println(q.Shift()) fmt.Println(q.Shift()) fmt.Println(q.Shift()) fmt.Println(q.Shift()) }