Решение на HTTP сваляч от Даниел Тасков

Обратно към всички решения

Към профила на Даниел Тасков

Резултати

  • 6 точки от тестове
  • 0 бонус точки
  • 6 точки общо
  • 8 успешни тест(а)
  • 5 неуспешни тест(а)

Код

package main
import "sync"
type empty struct{}
type Request interface {
ID() string
Run() (result interface{}, err error)
Cacheable() bool
SetResult(result interface{}, err error)
}
type Requester interface {
AddRequest(request Request)
Stop()
}
func NewRequester(cacheSize int, throttleSize int) Requester {
r := new(myRequester)
r.cache = newCache(cacheSize)
r.threshold = make(chan empty, throttleSize)
r.processing = map[string]*sync.Mutex{}
r.running = true
return r
}
type myRequester struct {
cache cache
threshold chan empty
processing map[string]*sync.Mutex
running bool
requests sync.WaitGroup
sync.Mutex
}
func (mr *myRequester) AddRequest(request Request) {
mr.Lock()
running := mr.running
mr.Unlock()
if running {
id := request.ID()
mr.Lock()
if _, ok := mr.processing[id]; !ok {
mr.processing[id] = new(sync.Mutex)
}
mr.processing[id].Lock()
mr.Unlock()
mr.requests.Add(1)
defer mr.requests.Done()
if v, ok := mr.cache.get(id); ok {
request.SetResult(v.val(), v.err())
} else {
mr.threshold <- empty{}
val, err := request.Run()
<-mr.threshold
if request.Cacheable() {
mr.cache.cache(id, newCacheable(val, err))
}
}
mr.Lock()
mr.processing[id].Unlock()
mr.Unlock()
}
}
func (mr *myRequester) Stop() {
mr.Lock()
mr.running = false
mr.Unlock()
mr.requests.Wait()
}
type cache interface {
get(id string) (cacheable, bool)
cache(id string, c cacheable)
}
func newCache(cacheSize int) cache {
buf := new(buffer)
buf.indexes = map[string]int{}
buf.ids = map[int]string{}
buf.buffered = make([]bufferable, cacheSize, cacheSize)
buf.last = 0
buf.size = cacheSize
return buf
}
type buffer struct {
indexes map[string]int
ids map[int]string
buffered []bufferable
last int
size int
sync.Mutex
}
func (b *buffer) get(id string) (cacheable, bool) {
b.Lock()
defer b.Unlock()
if i, ok := b.indexes[id]; ok {
val := b.buffered[i]
return &val, true
}
return nil, false
}
func (b *buffer) cache(id string, c cacheable) {
b.Lock()
defer b.Unlock()
oldid := b.ids[b.last]
delete(b.ids, b.last)
delete(b.indexes, oldid)
b.ids[b.last] = id
b.indexes[id] = b.last
b.buffered[b.last] = bufferable{c.val(), c.err()}
b.last++
b.last %= b.size
}
type cacheable interface {
val() interface{}
err() error
}
func newCacheable(val interface{}, err error) cacheable {
return &bufferable{val, err}
}
type bufferable struct {
value interface{}
error error
}
func (b *bufferable) val() interface{} {
return b.value
}
func (b *bufferable) err() error {
return b.error
}

Лог от изпълнението

PASS
ok  	_/tmp/d20160101-5892-1lm7ec	0.003s
PASS
ok  	_/tmp/d20160101-5892-1lm7ec	0.003s
panic: test timed out after 1s

goroutine 26 [running]:
testing.startAlarm.func1()
	/usr/local/go/src/testing/testing.go:703 +0x132
created by time.goFunc
	/usr/local/go/src/time/sleep.go:129 +0x3a

goroutine 1 [chan receive]:
testing.RunTests(0x5e4f58, 0x671e20, 0xd, 0xd, 0x1)
	/usr/local/go/src/testing/testing.go:562 +0x8ad
testing.(*M).Run(0xc82003fef8, 0xc820062510)
	/usr/local/go/src/testing/testing.go:494 +0x70
main.main()
	_/tmp/d20160101-5892-1lm7ec/_test/_testmain.go:78 +0x116

goroutine 23 [chan receive]:
_/tmp/d20160101-5892-1lm7ec.TestNonCacheableRequestsFast(0xc8200a2090)
	/tmp/d20160101-5892-1lm7ec/solution_test.go:395 +0x862
testing.tRunner(0xc8200a2090, 0x671ee0)
	/usr/local/go/src/testing/testing.go:456 +0x98
created by testing.RunTests
	/usr/local/go/src/testing/testing.go:561 +0x86d

goroutine 24 [semacquire]:
sync.runtime_Semacquire(0xc82005e778)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc82005e774)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-1lm7ec.(*myRequester).AddRequest(0xc82005e740, 0x7fd304aee580, 0xc820066510)
	/tmp/d20160101-5892-1lm7ec/solution.go:65 +0x3a1
created by _/tmp/d20160101-5892-1lm7ec.TestNonCacheableRequestsFast
	/tmp/d20160101-5892-1lm7ec/solution_test.go:377 +0x59f

goroutine 25 [semacquire]:
sync.runtime_Semacquire(0xc8200625f4)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc8200625f0)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-1lm7ec.(*myRequester).AddRequest(0xc82005e740, 0x7fd304aee580, 0xc820066570)
	/tmp/d20160101-5892-1lm7ec/solution.go:49 +0x1f0
created by _/tmp/d20160101-5892-1lm7ec.TestNonCacheableRequestsFast
	/tmp/d20160101-5892-1lm7ec/solution_test.go:388 +0x772
exit status 2
FAIL	_/tmp/d20160101-5892-1lm7ec	1.006s
PASS
ok  	_/tmp/d20160101-5892-1lm7ec	0.003s
PASS
ok  	_/tmp/d20160101-5892-1lm7ec	0.003s
PASS
ok  	_/tmp/d20160101-5892-1lm7ec	0.043s
panic: test timed out after 1s

goroutine 10 [running]:
testing.startAlarm.func1()
	/usr/local/go/src/testing/testing.go:703 +0x132
created by time.goFunc
	/usr/local/go/src/time/sleep.go:129 +0x3a

goroutine 1 [chan receive]:
testing.RunTests(0x5e4f58, 0x671e20, 0xd, 0xd, 0x1)
	/usr/local/go/src/testing/testing.go:562 +0x8ad
testing.(*M).Run(0xc82003fef8, 0xc820010650)
	/usr/local/go/src/testing/testing.go:494 +0x70
main.main()
	_/tmp/d20160101-5892-1lm7ec/_test/_testmain.go:78 +0x116

goroutine 6 [chan receive]:
_/tmp/d20160101-5892-1lm7ec.TestCacheSize(0xc82008e000)
	/tmp/d20160101-5892-1lm7ec/solution_test.go:293 +0xb5d
testing.tRunner(0xc82008e000, 0x671eb0)
	/usr/local/go/src/testing/testing.go:456 +0x98
created by testing.RunTests
	/usr/local/go/src/testing/testing.go:561 +0x86d

goroutine 17 [semacquire]:
sync.runtime_Semacquire(0xc820010784)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc820010780)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-1lm7ec.(*myRequester).AddRequest(0xc820016700, 0x7f4b9d9d15a8, 0xc820010730)
	/tmp/d20160101-5892-1lm7ec/solution.go:49 +0x1f0
created by _/tmp/d20160101-5892-1lm7ec.TestCacheSize
	/tmp/d20160101-5892-1lm7ec/solution_test.go:292 +0xb3a

goroutine 9 [semacquire]:
sync.runtime_Semacquire(0xc820016738)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc820016734)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-1lm7ec.(*myRequester).AddRequest(0xc820016700, 0x7f4b9d9d15a8, 0xc820010730)
	/tmp/d20160101-5892-1lm7ec/solution.go:65 +0x3a1
created by _/tmp/d20160101-5892-1lm7ec.TestCacheSize
	/tmp/d20160101-5892-1lm7ec/solution_test.go:285 +0x9e9
exit status 2
FAIL	_/tmp/d20160101-5892-1lm7ec	1.005s
PASS
ok  	_/tmp/d20160101-5892-1lm7ec	0.043s
panic: test timed out after 1s

goroutine 23 [running]:
testing.startAlarm.func1()
	/usr/local/go/src/testing/testing.go:703 +0x132
created by time.goFunc
	/usr/local/go/src/time/sleep.go:129 +0x3a

goroutine 1 [chan receive]:
testing.RunTests(0x5e4f58, 0x671e20, 0xd, 0xd, 0x1)
	/usr/local/go/src/testing/testing.go:562 +0x8ad
testing.(*M).Run(0xc82003fef8, 0xc820062510)
	/usr/local/go/src/testing/testing.go:494 +0x70
main.main()
	_/tmp/d20160101-5892-1lm7ec/_test/_testmain.go:78 +0x116

goroutine 20 [chan receive]:
_/tmp/d20160101-5892-1lm7ec.TestNonCacheableRequestsFast(0xc8200a4000)
	/tmp/d20160101-5892-1lm7ec/solution_test.go:395 +0x862
testing.tRunner(0xc8200a4000, 0x671ee0)
	/usr/local/go/src/testing/testing.go:456 +0x98
created by testing.RunTests
	/usr/local/go/src/testing/testing.go:561 +0x86d

goroutine 21 [semacquire]:
sync.runtime_Semacquire(0xc82005e638)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc82005e634)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-1lm7ec.(*myRequester).AddRequest(0xc82005e600, 0x7efd6f358580, 0xc820066420)
	/tmp/d20160101-5892-1lm7ec/solution.go:65 +0x3a1
created by _/tmp/d20160101-5892-1lm7ec.TestNonCacheableRequestsFast
	/tmp/d20160101-5892-1lm7ec/solution_test.go:377 +0x59f

goroutine 22 [semacquire]:
sync.runtime_Semacquire(0xc8200625a4)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc8200625a0)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-1lm7ec.(*myRequester).AddRequest(0xc82005e600, 0x7efd6f358580, 0xc820066480)
	/tmp/d20160101-5892-1lm7ec/solution.go:49 +0x1f0
created by _/tmp/d20160101-5892-1lm7ec.TestNonCacheableRequestsFast
	/tmp/d20160101-5892-1lm7ec/solution_test.go:388 +0x772
exit status 2
FAIL	_/tmp/d20160101-5892-1lm7ec	1.006s
panic: test timed out after 1s

goroutine 11 [running]:
testing.startAlarm.func1()
	/usr/local/go/src/testing/testing.go:703 +0x132
created by time.goFunc
	/usr/local/go/src/time/sleep.go:129 +0x3a

goroutine 1 [chan receive]:
testing.RunTests(0x5e4f58, 0x671e20, 0xd, 0xd, 0x1)
	/usr/local/go/src/testing/testing.go:562 +0x8ad
testing.(*M).Run(0xc82003fef8, 0xc820010650)
	/usr/local/go/src/testing/testing.go:494 +0x70
main.main()
	_/tmp/d20160101-5892-1lm7ec/_test/_testmain.go:78 +0x116

goroutine 6 [chan receive]:
_/tmp/d20160101-5892-1lm7ec.TestStopWithQueue(0xc82008e000)
	/tmp/d20160101-5892-1lm7ec/solution_test.go:454 +0xad6
testing.tRunner(0xc82008e000, 0x671ef8)
	/usr/local/go/src/testing/testing.go:456 +0x98
created by testing.RunTests
	/usr/local/go/src/testing/testing.go:561 +0x86d

goroutine 7 [semacquire]:
sync.runtime_Semacquire(0xc8200166b8)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc8200166b4)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-1lm7ec.(*myRequester).AddRequest(0xc820016680, 0x7faee1d015a8, 0xc82000a510)
	/tmp/d20160101-5892-1lm7ec/solution.go:65 +0x3a1
created by _/tmp/d20160101-5892-1lm7ec.TestStopWithQueue
	/tmp/d20160101-5892-1lm7ec/solution_test.go:439 +0x890

goroutine 8 [semacquire]:
sync.runtime_Semacquire(0xc8200106f4)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc8200106f0)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-1lm7ec.(*myRequester).AddRequest(0xc820016680, 0x7faee1d015a8, 0xc82000a540)
	/tmp/d20160101-5892-1lm7ec/solution.go:49 +0x1f0
created by _/tmp/d20160101-5892-1lm7ec.TestStopWithQueue
	/tmp/d20160101-5892-1lm7ec/solution_test.go:441 +0x928

goroutine 17 [semacquire]:
sync.runtime_Semacquire(0xc8200166b8)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc8200166b4)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-1lm7ec.(*myRequester).Stop(0xc820016680)
	/tmp/d20160101-5892-1lm7ec/solution.go:72 +0x2d
_/tmp/d20160101-5892-1lm7ec.TestStopWithQueue.func3(0xc82001e3c0, 0x7faee1d01578, 0xc820016680, 0xc82001e420)
	/tmp/d20160101-5892-1lm7ec/solution_test.go:446 +0x35
created by _/tmp/d20160101-5892-1lm7ec.TestStopWithQueue
	/tmp/d20160101-5892-1lm7ec/solution_test.go:448 +0x9f0

goroutine 10 [semacquire]:
sync.runtime_Semacquire(0xc8200166b8)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc8200166b4)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-1lm7ec.(*myRequester).AddRequest(0xc820016680, 0x7faee1d015a8, 0xc82000a5a0)
	/tmp/d20160101-5892-1lm7ec/solution.go:38 +0x43
created by _/tmp/d20160101-5892-1lm7ec.TestStopWithQueue
	/tmp/d20160101-5892-1lm7ec/solution_test.go:451 +0xa95
exit status 2
FAIL	_/tmp/d20160101-5892-1lm7ec	1.006s
PASS
ok  	_/tmp/d20160101-5892-1lm7ec	0.033s
PASS
ok  	_/tmp/d20160101-5892-1lm7ec	0.053s
panic: test timed out after 1s

goroutine 10 [running]:
testing.startAlarm.func1()
	/usr/local/go/src/testing/testing.go:703 +0x132
created by time.goFunc
	/usr/local/go/src/time/sleep.go:129 +0x3a

goroutine 1 [chan receive]:
testing.RunTests(0x5e4f58, 0x671e20, 0xd, 0xd, 0x1)
	/usr/local/go/src/testing/testing.go:562 +0x8ad
testing.(*M).Run(0xc82003fef8, 0xc820010650)
	/usr/local/go/src/testing/testing.go:494 +0x70
main.main()
	_/tmp/d20160101-5892-1lm7ec/_test/_testmain.go:78 +0x116

goroutine 6 [chan receive]:
_/tmp/d20160101-5892-1lm7ec.TestStopWithQueueFromForum(0xc82008e000)
	/tmp/d20160101-5892-1lm7ec/solution_test.go:633 +0x105c
testing.tRunner(0xc82008e000, 0x671f40)
	/usr/local/go/src/testing/testing.go:456 +0x98
created by testing.RunTests
	/usr/local/go/src/testing/testing.go:561 +0x86d

goroutine 7 [semacquire]:
sync.runtime_Semacquire(0xc8200166f8)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc8200166f4)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-1lm7ec.(*myRequester).AddRequest(0xc8200166c0, 0x7f0df393f5a8, 0xc82000a510)
	/tmp/d20160101-5892-1lm7ec/solution.go:65 +0x3a1
created by _/tmp/d20160101-5892-1lm7ec.TestStopWithQueueFromForum
	/tmp/d20160101-5892-1lm7ec/solution_test.go:612 +0xca0

goroutine 8 [semacquire]:
sync.runtime_Semacquire(0xc8200166f8)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc8200166f4)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-1lm7ec.(*myRequester).AddRequest(0xc8200166c0, 0x7f0df393f5a8, 0xc82000a5a0)
	/tmp/d20160101-5892-1lm7ec/solution.go:65 +0x3a1
created by _/tmp/d20160101-5892-1lm7ec.TestStopWithQueueFromForum
	/tmp/d20160101-5892-1lm7ec/solution_test.go:613 +0xd18

goroutine 17 [semacquire]:
sync.runtime_Semacquire(0xc8200166f8)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc8200166f4)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-1lm7ec.(*myRequester).AddRequest(0xc8200166c0, 0x7f0df393f5a8, 0xc82000a540)
	/tmp/d20160101-5892-1lm7ec/solution.go:38 +0x43
created by _/tmp/d20160101-5892-1lm7ec.TestStopWithQueueFromForum
	/tmp/d20160101-5892-1lm7ec/solution_test.go:616 +0xdd9

goroutine 18 [semacquire]:
sync.runtime_Semacquire(0xc8200106f4)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc8200106f0)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-1lm7ec.(*myRequester).AddRequest(0xc8200166c0, 0x7f0df393f5a8, 0xc82000a5d0)
	/tmp/d20160101-5892-1lm7ec/solution.go:49 +0x1f0
created by _/tmp/d20160101-5892-1lm7ec.TestStopWithQueueFromForum
	/tmp/d20160101-5892-1lm7ec/solution_test.go:617 +0xe4e

goroutine 9 [semacquire]:
sync.runtime_Semacquire(0xc8200166f8)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc8200166f4)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-1lm7ec.(*myRequester).Stop(0xc8200166c0)
	/tmp/d20160101-5892-1lm7ec/solution.go:72 +0x2d
_/tmp/d20160101-5892-1lm7ec.TestStopWithQueueFromForum.func4(0xc82001e5a0, 0x7f0df393f578, 0xc8200166c0, 0xc82001e600)
	/tmp/d20160101-5892-1lm7ec/solution_test.go:621 +0x35
created by _/tmp/d20160101-5892-1lm7ec.TestStopWithQueueFromForum
	/tmp/d20160101-5892-1lm7ec/solution_test.go:623 +0xea1

goroutine 19 [semacquire]:
sync.runtime_Semacquire(0xc8200166f8)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc8200166f4)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-1lm7ec.(*myRequester).AddRequest(0xc8200166c0, 0x7f0df393f5a8, 0xc82000a600)
	/tmp/d20160101-5892-1lm7ec/solution.go:38 +0x43
created by _/tmp/d20160101-5892-1lm7ec.TestStopWithQueueFromForum
	/tmp/d20160101-5892-1lm7ec/solution_test.go:626 +0xf46

goroutine 20 [semacquire]:
sync.runtime_Semacquire(0xc8200166f8)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc8200166f4)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-1lm7ec.(*myRequester).AddRequest(0xc8200166c0, 0x7f0df393f5a8, 0xc82000a570)
	/tmp/d20160101-5892-1lm7ec/solution.go:38 +0x43
created by _/tmp/d20160101-5892-1lm7ec.TestStopWithQueueFromForum
	/tmp/d20160101-5892-1lm7ec/solution_test.go:627 +0xfbe
exit status 2
FAIL	_/tmp/d20160101-5892-1lm7ec	1.006s

История (3 версии и 5 коментара)

Даниел обнови решението на 25.12.2015 17:24 (преди над 2 години)

+package main
+
+import "sync"
+
+type Request interface {
+ ID() string
+ Run() (result interface{}, err error)
+ Cacheable() bool
+ SetResult(result interface{}, err error)
+}
+
+type Requester interface {
+ AddRequest(request Request)
+ Stop()
+}
+
+func NewRequester(cacheSize int, throttleSize int) Requester {
+ r := new(myRequester)
+ r.cache = newCache(cacheSize)
+ r.threshold = make(semaphore, throttleSize)
+ r.running = true
+ return r
+}
+
+type empty struct{}
+type semaphore chan empty
+
+type myRequester struct {
+ cache cache
+ threshold semaphore
+ running bool
+ sync.Mutex
+}
+
+func (mr *myRequester) AddRequest(request Request) {
+ go func() {
+ mr.threshold <- empty{}
+ mr.Lock()
+ running := mr.running
+ mr.Unlock()
+ if running {
+ id := request.ID()
+ cached := mr.cache.contains(id)
+ if cached {
+ request.SetResult(mr.cache.value(id).val(), mr.cache.value(id).err())
+ } else if val, err := request.Run(); request.Cacheable() {
+ mr.cache.cache(id, &bufferable{val, err})
+ }
+ }
+ <-mr.threshold
+ }()
+}
+
+func (mr *myRequester) Stop() {
+ mr.Lock()
+ mr.running = false
+ mr.Unlock()
+}
+
+type cache interface {
+ contains(id string) bool
+ value(id string) cacheable
+ cache(id string, c cacheable)
+}
+
+func newCache(cacheSize int) cache {
+ buf := new(buffer)
+ buf.indexes = map[string]int{}
+ buf.ids = map[int]string{}
+ buf.buffered = make([]bufferable, cacheSize, cacheSize)
+ buf.last = 0
+ buf.size = cacheSize
+ return buf
+}
+
+type buffer struct {
+ indexes map[string]int
+ ids map[int]string
+ buffered []bufferable
+ last int
+ size int
+ sync.Mutex
+}
+
+func (b *buffer) contains(id string) bool {
+ b.Lock()
+ defer b.Unlock()
+ _, ok := b.indexes[id]
+ return ok
+}
+
+func (b *buffer) value(id string) cacheable {
+ b.Lock()
+ defer b.Unlock()
+ val := b.buffered[b.indexes[id]]
+ return &val
+}
+
+func (b *buffer) cache(id string, c cacheable) {
+ b.Lock()
+ defer b.Unlock()
+ oldid := b.ids[b.last]
+ delete(b.ids, b.last)
+ delete(b.indexes, oldid)
+
+ b.ids[b.last] = id
+ b.indexes[id] = b.last
+ b.buffered[b.last] = bufferable{c.val(), c.err()}
+ b.last++
+ b.last %= b.size
+}
+
+type cacheable interface {
+ val() interface{}
+ err() error
+}
+
+type bufferable struct {
+ value interface{}
+ error error
+}
+
+func (b *bufferable) val() interface{} {
+ return b.value
+}
+
+func (b *bufferable) err() error {
+ return b.error
+}

Весела Коледа и Благодаря за коледния Подарък, но:

  1. Не хващаш случая в който се подават две заявки една след друга които са еднакви и към момента на подаване на втората първата още не е свършила.
  2. throtleSize-а е за изпълняваните заявки - тези на които им се вика Run.
  3. chan struct {} е толкова познато на всички go програмисти че тези типове които си направил са напълно ненужни.

Иначе ми харесва как си имаш свой си интерфейс и си го имплементираш(cache/buffer).

Весели Празници и приятно писане :smile:

Даниел обнови решението на 26.12.2015 21:24 (преди над 2 години)

package main
import "sync"
+type empty struct{}
+
type Request interface {
ID() string
Run() (result interface{}, err error)
Cacheable() bool
SetResult(result interface{}, err error)
}
type Requester interface {
AddRequest(request Request)
Stop()
}
func NewRequester(cacheSize int, throttleSize int) Requester {
r := new(myRequester)
r.cache = newCache(cacheSize)
- r.threshold = make(semaphore, throttleSize)
+ r.threshold = make(chan empty, throttleSize)
r.running = true
+ r.processing = map[string]empty{}
return r
}
-type empty struct{}
-type semaphore chan empty
-
type myRequester struct {
- cache cache
- threshold semaphore
- running bool
+ cache cache
+ threshold chan empty
+ running bool
+ processing map[string]empty
sync.Mutex
}
func (mr *myRequester) AddRequest(request Request) {
- go func() {
- mr.threshold <- empty{}
+ mr.Lock()
+ running := mr.running
+ mr.Unlock()
+
+ if running {
+ id := request.ID()
+ mr.waitToProcessRequest(id)
+
+ if v, ok := mr.cache.get(id); ok {
+ request.SetResult(v.val(), v.err())
+ } else {
+ mr.threshold <- empty{}
+ val, err := request.Run()
+ <-mr.threshold
+ if request.Cacheable() {
+ mr.cache.cache(id, newCacheable(val, err))
+ }
+ }
mr.Lock()
- running := mr.running
+ delete(mr.processing, id)
mr.Unlock()
- if running {
- id := request.ID()
- cached := mr.cache.contains(id)
- if cached {
- request.SetResult(mr.cache.value(id).val(), mr.cache.value(id).err())
- } else if val, err := request.Run(); request.Cacheable() {
- mr.cache.cache(id, &bufferable{val, err})
- }
+ }
+}
+
+func (mr *myRequester) waitToProcessRequest(id string) {
+ for ok := true; ok; {
+ mr.Lock()
+ if _, ok = mr.processing[id]; !ok {
+ mr.processing[id] = empty{}
}
- <-mr.threshold
- }()
+ mr.Unlock()
+ }
}
func (mr *myRequester) Stop() {
mr.Lock()
mr.running = false
mr.Unlock()
}
type cache interface {
- contains(id string) bool
- value(id string) cacheable
+ get(id string) (cacheable, bool)
cache(id string, c cacheable)
}
func newCache(cacheSize int) cache {
buf := new(buffer)
buf.indexes = map[string]int{}
buf.ids = map[int]string{}
buf.buffered = make([]bufferable, cacheSize, cacheSize)
buf.last = 0
buf.size = cacheSize
+
return buf
}
type buffer struct {
indexes map[string]int
ids map[int]string
buffered []bufferable
last int
size int
sync.Mutex
}
-func (b *buffer) contains(id string) bool {
+func (b *buffer) get(id string) (cacheable, bool) {
b.Lock()
defer b.Unlock()
- _, ok := b.indexes[id]
- return ok
-}
-func (b *buffer) value(id string) cacheable {
- b.Lock()
- defer b.Unlock()
- val := b.buffered[b.indexes[id]]
- return &val
+ if i, ok := b.indexes[id]; ok {
+ val := b.buffered[i]
+ return &val, true
+ }
+ return nil, false
}
func (b *buffer) cache(id string, c cacheable) {
b.Lock()
defer b.Unlock()
+
oldid := b.ids[b.last]
delete(b.ids, b.last)
delete(b.indexes, oldid)
b.ids[b.last] = id
b.indexes[id] = b.last
b.buffered[b.last] = bufferable{c.val(), c.err()}
b.last++
b.last %= b.size
}
type cacheable interface {
val() interface{}
err() error
+}
+
+func newCacheable(val interface{}, err error) cacheable {
+ return &bufferable{val, err}
}
type bufferable struct {
value interface{}
error error
}
func (b *bufferable) val() interface{} {
return b.value
}
func (b *bufferable) err() error {
return b.error
}

Весела Коледа! Иначее страхотен коледен подарък, няма що -.-

Оправих 1. и 2., аа за 3. махнах type semaphore chan empty, но type empty struct{} го оставих, защото са ми много грозни тези struct{}{} навсякъде, а и доколкото виждам е доста използвано и из Go обществото, или ? : )

Добър вечер,

На бързо и градус:

  1. какво се случва във waitToProcessRequest ако се получат два request-а за едно и също ID и request-а е много бавен(минути)?
  2. Изглежда ми че throttle-ването ти не е на изпълнение а на кеширане
  3. Прочети условието на Stop и първия ми коментар във форума, този в отговор на твоя въпрос.

Лека вечер, приятно кодене и Весела Нова Година.

Даниел обнови решението на 30.12.2015 01:26 (преди над 2 години)

package main
import "sync"
type empty struct{}
type Request interface {
ID() string
Run() (result interface{}, err error)
Cacheable() bool
SetResult(result interface{}, err error)
}
type Requester interface {
AddRequest(request Request)
Stop()
}
func NewRequester(cacheSize int, throttleSize int) Requester {
r := new(myRequester)
r.cache = newCache(cacheSize)
r.threshold = make(chan empty, throttleSize)
+ r.processing = map[string]*sync.Mutex{}
r.running = true
- r.processing = map[string]empty{}
return r
}
type myRequester struct {
cache cache
threshold chan empty
+ processing map[string]*sync.Mutex
running bool
- processing map[string]empty
+ requests sync.WaitGroup
sync.Mutex
}
func (mr *myRequester) AddRequest(request Request) {
mr.Lock()
running := mr.running
mr.Unlock()
if running {
id := request.ID()
- mr.waitToProcessRequest(id)
+ mr.Lock()
+ if _, ok := mr.processing[id]; !ok {
+ mr.processing[id] = new(sync.Mutex)
+ }
+ mr.processing[id].Lock()
+ mr.Unlock()
+
+ mr.requests.Add(1)
+ defer mr.requests.Done()
+
if v, ok := mr.cache.get(id); ok {
request.SetResult(v.val(), v.err())
} else {
mr.threshold <- empty{}
val, err := request.Run()
<-mr.threshold
if request.Cacheable() {
mr.cache.cache(id, newCacheable(val, err))
}
}
mr.Lock()
- delete(mr.processing, id)
+ mr.processing[id].Unlock()
mr.Unlock()
}
}
-func (mr *myRequester) waitToProcessRequest(id string) {
- for ok := true; ok; {
- mr.Lock()
- if _, ok = mr.processing[id]; !ok {
- mr.processing[id] = empty{}
- }
- mr.Unlock()
- }
-}
-
func (mr *myRequester) Stop() {
mr.Lock()
mr.running = false
mr.Unlock()
+ mr.requests.Wait()
}
type cache interface {
get(id string) (cacheable, bool)
cache(id string, c cacheable)
}
func newCache(cacheSize int) cache {
buf := new(buffer)
buf.indexes = map[string]int{}
buf.ids = map[int]string{}
buf.buffered = make([]bufferable, cacheSize, cacheSize)
buf.last = 0
buf.size = cacheSize
return buf
}
type buffer struct {
indexes map[string]int
ids map[int]string
buffered []bufferable
last int
size int
sync.Mutex
}
func (b *buffer) get(id string) (cacheable, bool) {
b.Lock()
defer b.Unlock()
if i, ok := b.indexes[id]; ok {
val := b.buffered[i]
return &val, true
}
return nil, false
}
func (b *buffer) cache(id string, c cacheable) {
b.Lock()
defer b.Unlock()
oldid := b.ids[b.last]
delete(b.ids, b.last)
delete(b.indexes, oldid)
b.ids[b.last] = id
b.indexes[id] = b.last
b.buffered[b.last] = bufferable{c.val(), c.err()}
b.last++
b.last %= b.size
}
type cacheable interface {
val() interface{}
err() error
}
func newCacheable(val interface{}, err error) cacheable {
return &bufferable{val, err}
}
type bufferable struct {
value interface{}
error error
}
func (b *bufferable) val() interface{} {
return b.value
}
func (b *bufferable) err() error {
return b.error
}