Решение на HTTP сваляч от Добромир Иванов

Обратно към всички решения

Към профила на Добромир Иванов

Резултати

  • 6 точки от тестове
  • 0 бонус точки
  • 6 точки общо
  • 8 успешни тест(а)
  • 5 неуспешни тест(а)

Код

package main
import (
"sync"
)
type Request interface {
ID() string
Run() (result interface{}, err error)
Cacheable() bool
SetResult(result interface{}, err error)
}
type Requester interface {
AddRequest(request Request)
Stop()
}
type equalityLock struct {
locks map[string]*sync.Mutex
}
func newEqualityLock() equalityLock {
return equalityLock{make(map[string]*sync.Mutex)}
}
func (lock equalityLock) accuire(id string) *sync.Mutex {
value, ok := lock.locks[id]
if ok {
return value
}
lock.locks[id] = new(sync.Mutex)
return lock.locks[id]
}
func (lock equalityLock) release(id string) {
delete(lock.locks, id)
}
type cacheEntry struct {
value interface{}
err error
}
type cache struct {
cacheSize int
cacheOrder []string
cache map[string]cacheEntry
}
func newCache(cacheSize int) cache {
cache := cache{}
cache.cacheSize = cacheSize
cache.cacheOrder = make([]string, 0, cacheSize)
cache.cache = make(map[string]cacheEntry)
return cache
}
func (cacheRepo cache) isCached(id string) bool {
_, ok := cacheRepo.cache[id]
return ok
}
func (cacheRepo cache) get(id string) cacheEntry {
return cacheRepo.cache[id]
}
func (cacheRepo cache) put(value cacheEntry, id string) {
if len(cacheRepo.cacheOrder) == cacheRepo.cacheSize {
delete(cacheRepo.cache, cacheRepo.cacheOrder[0])
cacheRepo.cacheOrder = append(cacheRepo.cacheOrder[1:], id)
} else {
cacheRepo.cacheOrder = append(cacheRepo.cacheOrder, id)
}
cacheRepo.cache[id] = value
}
type cacheRunner struct {
active bool
cacher cache
throttle chan struct{}
requestLocks equalityLock
lock *sync.Mutex
}
func NewRequester(cacheSize int, throttleSize int) Requester {
return cacheRunner{
active: true,
cacher: newCache(cacheSize),
throttle: make(chan struct{}, throttleSize),
requestLocks: newEqualityLock(),
lock: new(sync.Mutex),
}
}
func (runner cacheRunner) AddRequest(request Request) {
runner.throttle <- struct{}{}
runner.lock.Lock()
if runner.active == false {
<-runner.throttle
runner.lock.Unlock()
return
}
if runner.cacher.isCached(request.ID()) {
entry := runner.cacher.get(request.ID())
request.SetResult(entry.value, entry.err)
<-runner.throttle
runner.lock.Unlock()
return
}
requestLock := runner.requestLocks.accuire(request.ID())
runner.lock.Unlock()
requestLock.Lock()
runner.lock.Lock()
if runner.cacher.isCached(request.ID()) {
entry := runner.cacher.get(request.ID())
request.SetResult(entry.value, entry.err)
<-runner.throttle
requestLock.Unlock()
runner.lock.Unlock()
return
}
result, err := request.Run()
if request.Cacheable() {
runner.cacher.put(cacheEntry{result, err}, request.ID())
}
<-runner.throttle
runner.lock.Unlock()
requestLock.Unlock()
}
func (runner cacheRunner) Stop() {
runner.lock.Lock()
runner.active = false
runner.lock.Unlock()
}

Лог от изпълнението

PASS
ok  	_/tmp/d20160101-5892-are7ho	0.003s
PASS
ok  	_/tmp/d20160101-5892-are7ho	0.003s
PASS
ok  	_/tmp/d20160101-5892-are7ho	0.103s
PASS
ok  	_/tmp/d20160101-5892-are7ho	0.003s
panic: test timed out after 1s

goroutine 17 [running]:
testing.startAlarm.func1()
	/usr/local/go/src/testing/testing.go:703 +0x132
created by time.goFunc
	/usr/local/go/src/time/sleep.go:129 +0x3a

goroutine 1 [chan receive]:
testing.RunTests(0x5e5998, 0x673e20, 0xd, 0xd, 0x1)
	/usr/local/go/src/testing/testing.go:562 +0x8ad
testing.(*M).Run(0xc82003fef8, 0xc820010650)
	/usr/local/go/src/testing/testing.go:494 +0x70
main.main()
	_/tmp/d20160101-5892-are7ho/_test/_testmain.go:78 +0x116

goroutine 6 [chan receive]:
_/tmp/d20160101-5892-are7ho.TestRequestsAreRunAsyncly(0xc820090000)
	/tmp/d20160101-5892-are7ho/solution_test.go:190 +0x83f
testing.tRunner(0xc820090000, 0x673e80)
	/usr/local/go/src/testing/testing.go:456 +0x98
created by testing.RunTests
	/usr/local/go/src/testing/testing.go:561 +0x86d

goroutine 7 [chan receive]:
_/tmp/d20160101-5892-are7ho.TestRequestsAreRunAsyncly.func1(0x0, 0x0, 0x0, 0x0)
	/tmp/d20160101-5892-are7ho/solution_test.go:171 +0xb1
_/tmp/d20160101-5892-are7ho.(*request).Run(0xc82000a4e0, 0x0, 0x0, 0x0, 0x0)
	/tmp/d20160101-5892-are7ho/solution_test.go:37 +0xa5
_/tmp/d20160101-5892-are7ho.cacheRunner.AddRequest(0x1, 0xa, 0xc820076140, 0x0, 0xa, 0xc82000a480, 0xc82001e2a0, 0xc82000a4b0, 0xc8200106e0, 0x7f5a689a9578, ...)
	/tmp/d20160101-5892-are7ho/solution.go:133 +0x6a0
_/tmp/d20160101-5892-are7ho.(*cacheRunner).AddRequest(0xc82001a140, 0x7f5a689a9578, 0xc82000a4e0)
	<autogenerated>:6 +0xaa
created by _/tmp/d20160101-5892-are7ho.TestRequestsAreRunAsyncly
	/tmp/d20160101-5892-are7ho/solution_test.go:187 +0x784

goroutine 8 [semacquire]:
sync.runtime_Semacquire(0xc8200106e4)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc8200106e0)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-are7ho.cacheRunner.AddRequest(0x1, 0xa, 0xc820076140, 0x0, 0xa, 0xc82000a480, 0xc82001e2a0, 0xc82000a4b0, 0xc8200106e0, 0x7f5a689a9578, ...)
	/tmp/d20160101-5892-are7ho/solution.go:100 +0x5c
_/tmp/d20160101-5892-are7ho.(*cacheRunner).AddRequest(0xc82001a140, 0x7f5a689a9578, 0xc82000a540)
	<autogenerated>:6 +0xaa
created by _/tmp/d20160101-5892-are7ho.TestRequestsAreRunAsyncly
	/tmp/d20160101-5892-are7ho/solution_test.go:189 +0x81c
exit status 2
FAIL	_/tmp/d20160101-5892-are7ho	1.005s
--- FAIL: TestNoRunsAfterStop (0.04s)
	solution_test.go:57: request after stop was ran
FAIL
exit status 1
FAIL	_/tmp/d20160101-5892-are7ho	0.043s
panic: test timed out after 1s

goroutine 27 [running]:
testing.startAlarm.func1()
	/usr/local/go/src/testing/testing.go:703 +0x132
created by time.goFunc
	/usr/local/go/src/time/sleep.go:129 +0x3a

goroutine 1 [chan receive]:
testing.RunTests(0x5e5998, 0x673e20, 0xd, 0xd, 0x1)
	/usr/local/go/src/testing/testing.go:562 +0x8ad
testing.(*M).Run(0xc82003fef8, 0xc820062510)
	/usr/local/go/src/testing/testing.go:494 +0x70
main.main()
	_/tmp/d20160101-5892-are7ho/_test/_testmain.go:78 +0x116

goroutine 20 [chan send]:
_/tmp/d20160101-5892-are7ho.TestCacheSize(0xc8200a0000)
	/tmp/d20160101-5892-are7ho/solution_test.go:303 +0xce5
testing.tRunner(0xc8200a0000, 0x673eb0)
	/usr/local/go/src/testing/testing.go:456 +0x98
created by testing.RunTests
	/usr/local/go/src/testing/testing.go:561 +0x86d
exit status 2
FAIL	_/tmp/d20160101-5892-are7ho	1.005s
PASS
ok  	_/tmp/d20160101-5892-are7ho	0.063s
PASS
ok  	_/tmp/d20160101-5892-are7ho	0.053s
panic: test timed out after 1s

goroutine 18 [running]:
testing.startAlarm.func1()
	/usr/local/go/src/testing/testing.go:703 +0x132
created by time.goFunc
	/usr/local/go/src/time/sleep.go:129 +0x3a

goroutine 1 [chan receive]:
testing.RunTests(0x5e5998, 0x673e20, 0xd, 0xd, 0x1)
	/usr/local/go/src/testing/testing.go:562 +0x8ad
testing.(*M).Run(0xc82003fef8, 0xc820010650)
	/usr/local/go/src/testing/testing.go:494 +0x70
main.main()
	_/tmp/d20160101-5892-are7ho/_test/_testmain.go:78 +0x116

goroutine 6 [chan receive]:
_/tmp/d20160101-5892-are7ho.TestStopWithQueue(0xc82008e000)
	/tmp/d20160101-5892-are7ho/solution_test.go:455 +0xaaf
testing.tRunner(0xc82008e000, 0x673ef8)
	/usr/local/go/src/testing/testing.go:456 +0x98
created by testing.RunTests
	/usr/local/go/src/testing/testing.go:561 +0x86d

goroutine 8 [chan send]:
_/tmp/d20160101-5892-are7ho.cacheRunner.AddRequest(0x1, 0x1, 0xc8200106e0, 0x0, 0x1, 0xc82000a480, 0xc82001e2a0, 0xc82000a4b0, 0xc8200106f0, 0x7f94856e0578, ...)
	/tmp/d20160101-5892-are7ho/solution.go:99 +0x4b
_/tmp/d20160101-5892-are7ho.(*cacheRunner).AddRequest(0xc82001a1e0, 0x7f94856e0578, 0xc82000a510)
	<autogenerated>:6 +0xaa
created by _/tmp/d20160101-5892-are7ho.TestStopWithQueue
	/tmp/d20160101-5892-are7ho/solution_test.go:441 +0x8de

goroutine 10 [chan send]:
_/tmp/d20160101-5892-are7ho.cacheRunner.AddRequest(0x1, 0x1, 0xc8200106e0, 0x0, 0x1, 0xc82000a480, 0xc82001e2a0, 0xc82000a4b0, 0xc8200106f0, 0x7f94856e0578, ...)
	/tmp/d20160101-5892-are7ho/solution.go:99 +0x4b
_/tmp/d20160101-5892-are7ho.(*cacheRunner).AddRequest(0xc82001a1e0, 0x7f94856e0578, 0xc82000a570)
	<autogenerated>:6 +0xaa
created by _/tmp/d20160101-5892-are7ho.TestStopWithQueue
	/tmp/d20160101-5892-are7ho/solution_test.go:451 +0xa4b
exit status 2
FAIL	_/tmp/d20160101-5892-are7ho	1.005s
PASS
ok  	_/tmp/d20160101-5892-are7ho	0.033s
PASS
ok  	_/tmp/d20160101-5892-are7ho	0.053s
panic: test timed out after 1s

goroutine 17 [running]:
testing.startAlarm.func1()
	/usr/local/go/src/testing/testing.go:703 +0x132
created by time.goFunc
	/usr/local/go/src/time/sleep.go:129 +0x3a

goroutine 1 [chan receive]:
testing.RunTests(0x5e5998, 0x673e20, 0xd, 0xd, 0x1)
	/usr/local/go/src/testing/testing.go:562 +0x8ad
testing.(*M).Run(0xc82003fef8, 0xc820010650)
	/usr/local/go/src/testing/testing.go:494 +0x70
main.main()
	_/tmp/d20160101-5892-are7ho/_test/_testmain.go:78 +0x116

goroutine 6 [chan receive]:
_/tmp/d20160101-5892-are7ho.TestStopWithQueueFromForum(0xc82008e000)
	/tmp/d20160101-5892-are7ho/solution_test.go:614 +0xcf4
testing.tRunner(0xc82008e000, 0x673f40)
	/usr/local/go/src/testing/testing.go:456 +0x98
created by testing.RunTests
	/usr/local/go/src/testing/testing.go:561 +0x86d

goroutine 7 [semacquire]:
sync.runtime_Semacquire(0xc8200106e4)
	/usr/local/go/src/runtime/sema.go:43 +0x26
sync.(*Mutex).Lock(0xc8200106e0)
	/usr/local/go/src/sync/mutex.go:82 +0x1c4
_/tmp/d20160101-5892-are7ho.cacheRunner.AddRequest(0x1, 0x3e8, 0xc820090000, 0x0, 0x3e8, 0xc82000a480, 0xc82001e2a0, 0xc82000a4b0, 0xc8200106e0, 0x7fe8fa074578, ...)
	/tmp/d20160101-5892-are7ho/solution.go:100 +0x5c
_/tmp/d20160101-5892-are7ho.(*cacheRunner).AddRequest(0xc82001a140, 0x7fe8fa074578, 0xc82000a4e0)
	<autogenerated>:6 +0xaa
created by _/tmp/d20160101-5892-are7ho.TestStopWithQueueFromForum
	/tmp/d20160101-5892-are7ho/solution_test.go:612 +0xc56

goroutine 8 [chan receive]:
_/tmp/d20160101-5892-are7ho.TestStopWithQueueFromForum.func3(0x0, 0x0, 0x0, 0x0)
	/tmp/d20160101-5892-are7ho/solution_test.go:584 +0x71
_/tmp/d20160101-5892-are7ho.(*request).Run(0xc82000a570, 0x0, 0x0, 0x0, 0x0)
	/tmp/d20160101-5892-are7ho/solution_test.go:37 +0xa5
_/tmp/d20160101-5892-are7ho.cacheRunner.AddRequest(0x1, 0x3e8, 0xc820090000, 0x0, 0x3e8, 0xc82000a480, 0xc82001e2a0, 0xc82000a4b0, 0xc8200106e0, 0x7fe8fa074578, ...)
	/tmp/d20160101-5892-are7ho/solution.go:133 +0x6a0
_/tmp/d20160101-5892-are7ho.(*cacheRunner).AddRequest(0xc82001a140, 0x7fe8fa074578, 0xc82000a570)
	<autogenerated>:6 +0xaa
created by _/tmp/d20160101-5892-are7ho.TestStopWithQueueFromForum
	/tmp/d20160101-5892-are7ho/solution_test.go:613 +0xcce
exit status 2
FAIL	_/tmp/d20160101-5892-are7ho	1.006s

История (3 версии и 1 коментар)

Добромир обнови решението на 31.12.2015 00:17 (преди над 2 години)

+package main
+
+import (
+ "sync"
+)
+
+type Request interface {
+ ID() string
+ Run() (result interface{}, err error)
+ Cacheable() bool
+ SetResult(result interface{}, err error)
+}
+
+type Requester interface {
+ AddRequest(request Request)
+ Stop()
+}
+
+type equalityLock struct {
+ locks map[string]*sync.Mutex
+}
+
+func (lock equalityLock) accuire(id string) *sync.Mutex {
+ value, ok := lock.locks[id]
+ if ok {
+ return value
+ }
+ lock.locks[id] = new(sync.Mutex)
+ return lock.locks[id]
+}
+
+type cacheEntry struct {
+ value interface{}
+ err error
+}
+
+type cache struct {
+ cacheSize int
+ cacheOrder []string
+ cache map[string]cacheEntry
+}
+
+func newCache(cacheSize int) cache {
+ cache := cache{}
+
+ cache.cacheSize = cacheSize
+ cache.cacheOrder = make([]string, 0, cacheSize)
+ cache.cache = make(map[string]cacheEntry)
+
+ return cache
+}
+
+func (cacheRepo cache) isCached(id string) bool {
+ _, ok := cacheRepo.cache[id]
+ return ok
+}
+
+func (cacheRepo cache) get(id string) cacheEntry {
+ return cacheRepo.cache[id]
+}
+
+func (cacheRepo cache) put(value cacheEntry, id string) {
+ if len(cacheRepo.cacheOrder) == cacheRepo.cacheSize {
+ delete(cacheRepo.cache, cacheRepo.cacheOrder[0])
+ cacheRepo.cacheOrder = append(cacheRepo.cacheOrder[1:], id)
+ } else {
+ cacheRepo.cacheOrder = append(cacheRepo.cacheOrder, id)
+ }
+ cacheRepo.cache[id] = value
+}
+
+type requestRunner struct {
+ requestLock equalityLock
+ throttle chan struct{}
+ active bool
+ cache cache
+ sync.Mutex
+}
+
+func NewRequester(cacheSize int, throttleSize int) Requester {
+ runner := requestRunner{}
+
+ runner.throttle = make(chan struct{}, throttleSize)
+ runner.active = true
+ runner.cache = newCache(cacheSize)
+ runner.requestLock.locks = make(map[string]*sync.Mutex)
+
+ return runner
+}
+
+func (runner requestRunner) AddRequest(request Request) {
+ runner.throttle <- struct{}{}
+ runner.Lock()
+ if runner.active == false {
+ runner.Unlock()
+ <-runner.throttle
+ return
+ }
+
+ if runner.cache.isCached(request.ID()) {
+ entry := runner.cache.get(request.ID())
+ request.SetResult(entry.value, entry.err)
+
+ <-runner.throttle
+ runner.Unlock()
+ return
+ }
+
+ lock := runner.requestLock.accuire(request.ID())
+ runner.Unlock()
+
+ lock.Lock()
+ runner.Lock()
+
+ if runner.cache.isCached(request.ID()) {
+ entry := runner.cache.get(request.ID())
+ request.SetResult(entry.value, entry.err)
+
+ lock.Unlock()
+ runner.Unlock()
+ <-runner.throttle
+ return
+ }
+
+ result, err := request.Run()
+
+ if request.Cacheable() {
+ runner.cache.put(cacheEntry{result, err}, request.ID())
+ }
+ runner.Unlock()
+ lock.Unlock()
+ <-runner.throttle
+}
+
+func (runner requestRunner) Stop() {
+ runner.Lock()
+ runner.active = false
+ runner.Unlock()
+}

Добромир обнови решението на 31.12.2015 11:26 (преди над 2 години)

package main
import (
"sync"
)
type Request interface {
ID() string
Run() (result interface{}, err error)
Cacheable() bool
SetResult(result interface{}, err error)
}
type Requester interface {
AddRequest(request Request)
Stop()
}
type equalityLock struct {
locks map[string]*sync.Mutex
}
+func newEqualityLock() equalityLock {
+ return equalityLock{make(map[string]*sync.Mutex)}
+}
+
func (lock equalityLock) accuire(id string) *sync.Mutex {
value, ok := lock.locks[id]
if ok {
return value
}
lock.locks[id] = new(sync.Mutex)
return lock.locks[id]
}
+func (lock equalityLock) release(id string) {
+ delete(lock.locks, id)
+}
+
type cacheEntry struct {
value interface{}
err error
}
type cache struct {
cacheSize int
cacheOrder []string
cache map[string]cacheEntry
}
func newCache(cacheSize int) cache {
cache := cache{}
cache.cacheSize = cacheSize
cache.cacheOrder = make([]string, 0, cacheSize)
cache.cache = make(map[string]cacheEntry)
return cache
}
func (cacheRepo cache) isCached(id string) bool {
_, ok := cacheRepo.cache[id]
return ok
}
func (cacheRepo cache) get(id string) cacheEntry {
return cacheRepo.cache[id]
}
func (cacheRepo cache) put(value cacheEntry, id string) {
if len(cacheRepo.cacheOrder) == cacheRepo.cacheSize {
delete(cacheRepo.cache, cacheRepo.cacheOrder[0])
cacheRepo.cacheOrder = append(cacheRepo.cacheOrder[1:], id)
} else {
cacheRepo.cacheOrder = append(cacheRepo.cacheOrder, id)
}
cacheRepo.cache[id] = value
}
-type requestRunner struct {
- requestLock equalityLock
- throttle chan struct{}
- active bool
- cache cache
- sync.Mutex
+type cacheRunner struct {
+ active bool
+ cacher cache
+ throttle chan struct{}
+ requestLocks equalityLock
+ lock *sync.Mutex
}
func NewRequester(cacheSize int, throttleSize int) Requester {
- runner := requestRunner{}
-
- runner.throttle = make(chan struct{}, throttleSize)
- runner.active = true
- runner.cache = newCache(cacheSize)
- runner.requestLock.locks = make(map[string]*sync.Mutex)
-
- return runner
+ return cacheRunner{
+ active: true,
+ cacher: newCache(cacheSize),
+ throttle: make(chan struct{}, throttleSize),
+ requestLocks: newEqualityLock(),
+ lock: new(sync.Mutex),
+ }
}
-func (runner requestRunner) AddRequest(request Request) {
+func (runner cacheRunner) AddRequest(request Request) {
runner.throttle <- struct{}{}
- runner.Lock()
+ runner.lock.Lock()
+
if runner.active == false {
- runner.Unlock()
<-runner.throttle
+ runner.lock.Unlock()
return
}
- if runner.cache.isCached(request.ID()) {
- entry := runner.cache.get(request.ID())
+ if runner.cacher.isCached(request.ID()) {
+ entry := runner.cacher.get(request.ID())
request.SetResult(entry.value, entry.err)
<-runner.throttle
- runner.Unlock()
+ runner.lock.Unlock()
return
}
- lock := runner.requestLock.accuire(request.ID())
- runner.Unlock()
+ requestLock := runner.requestLocks.accuire(request.ID())
+ runner.lock.Unlock()
- lock.Lock()
- runner.Lock()
+ requestLock.Lock()
+ runner.lock.Lock()
- if runner.cache.isCached(request.ID()) {
- entry := runner.cache.get(request.ID())
+ if runner.cacher.isCached(request.ID()) {
+ entry := runner.cacher.get(request.ID())
request.SetResult(entry.value, entry.err)
- lock.Unlock()
- runner.Unlock()
<-runner.throttle
+ requestLock.Unlock()
+ runner.lock.Unlock()
return
}
- result, err := request.Run()
+ //result, err := request.Run()
if request.Cacheable() {
- runner.cache.put(cacheEntry{result, err}, request.ID())
+ runner.cacher.put(cacheEntry{nil, nil}, request.ID())
}
- runner.Unlock()
- lock.Unlock()
+
<-runner.throttle
+ runner.lock.Unlock()
+ requestLock.Unlock()
}
-func (runner requestRunner) Stop() {
- runner.Lock()
+func (runner cacheRunner) Stop() {
+ runner.lock.Lock()
runner.active = false
- runner.Unlock()
+ runner.lock.Unlock()
}

Добромир обнови решението на 31.12.2015 11:29 (преди над 2 години)

package main
import (
"sync"
)
type Request interface {
ID() string
Run() (result interface{}, err error)
Cacheable() bool
SetResult(result interface{}, err error)
}
type Requester interface {
AddRequest(request Request)
Stop()
}
type equalityLock struct {
locks map[string]*sync.Mutex
}
func newEqualityLock() equalityLock {
return equalityLock{make(map[string]*sync.Mutex)}
}
func (lock equalityLock) accuire(id string) *sync.Mutex {
value, ok := lock.locks[id]
if ok {
return value
}
lock.locks[id] = new(sync.Mutex)
return lock.locks[id]
}
func (lock equalityLock) release(id string) {
delete(lock.locks, id)
}
type cacheEntry struct {
value interface{}
err error
}
type cache struct {
cacheSize int
cacheOrder []string
cache map[string]cacheEntry
}
func newCache(cacheSize int) cache {
cache := cache{}
cache.cacheSize = cacheSize
cache.cacheOrder = make([]string, 0, cacheSize)
cache.cache = make(map[string]cacheEntry)
return cache
}
func (cacheRepo cache) isCached(id string) bool {
_, ok := cacheRepo.cache[id]
return ok
}
func (cacheRepo cache) get(id string) cacheEntry {
return cacheRepo.cache[id]
}
func (cacheRepo cache) put(value cacheEntry, id string) {
if len(cacheRepo.cacheOrder) == cacheRepo.cacheSize {
delete(cacheRepo.cache, cacheRepo.cacheOrder[0])
cacheRepo.cacheOrder = append(cacheRepo.cacheOrder[1:], id)
} else {
cacheRepo.cacheOrder = append(cacheRepo.cacheOrder, id)
}
cacheRepo.cache[id] = value
}
type cacheRunner struct {
active bool
cacher cache
throttle chan struct{}
requestLocks equalityLock
lock *sync.Mutex
}
func NewRequester(cacheSize int, throttleSize int) Requester {
return cacheRunner{
active: true,
cacher: newCache(cacheSize),
throttle: make(chan struct{}, throttleSize),
requestLocks: newEqualityLock(),
lock: new(sync.Mutex),
}
}
func (runner cacheRunner) AddRequest(request Request) {
runner.throttle <- struct{}{}
runner.lock.Lock()
if runner.active == false {
<-runner.throttle
runner.lock.Unlock()
return
}
if runner.cacher.isCached(request.ID()) {
entry := runner.cacher.get(request.ID())
request.SetResult(entry.value, entry.err)
<-runner.throttle
runner.lock.Unlock()
return
}
requestLock := runner.requestLocks.accuire(request.ID())
runner.lock.Unlock()
requestLock.Lock()
runner.lock.Lock()
if runner.cacher.isCached(request.ID()) {
entry := runner.cacher.get(request.ID())
request.SetResult(entry.value, entry.err)
<-runner.throttle
requestLock.Unlock()
runner.lock.Unlock()
return
}
- //result, err := request.Run()
+ result, err := request.Run()
if request.Cacheable() {
- runner.cacher.put(cacheEntry{nil, nil}, request.ID())
+ runner.cacher.put(cacheEntry{result, err}, request.ID())
}
<-runner.throttle
runner.lock.Unlock()
requestLock.Unlock()
}
func (runner cacheRunner) Stop() {
runner.lock.Lock()
runner.active = false
runner.lock.Unlock()
}

Здрасти,

  • Stop-а ти не изчаква завършването на вече започнати процеси - пусни си тестовете
  • 118-121 ред са с цел евентуално да може да може повече от една заявка да влезе в AddRequest ?
  • покрай което искам да кажа че, явно не е станало ясно, но една от основните идеи на Requester-а е че като се добавят две заявки които не са равни, и throttle-а е достатъчно голям - техните Run-ове ще бъдат извикани асинхрнонно.

п.п. Весело изкарване на Новата Година