Решение на HTTP сваляч от Диана Генева

Обратно към всички решения

Към профила на Диана Генева

Резултати

  • 10 точки от тестове
  • 1 отнета точка
  • 9 точки общо
  • 13 успешни тест(а)
  • 0 неуспешни тест(а)

Код

package main
import "sync"
type Request interface {
ID() string
Run() (result interface{}, err error)
Cacheable() bool
SetResult(result interface{}, err error)
}
type Requester interface {
AddRequest(request Request)
Stop()
}
type Result struct {
id int
result interface{}
err error
}
type MyRequester struct {
lastCashId int
group *sync.WaitGroup
Cache map[string]Result
CurrentRequests map[string]chan struct{}
throttle chan struct{}
cacheSize int
stopped bool
locker chan struct{}
}
func (r *MyRequester) AddCache(id string, result interface{}, err error) {
if len(r.Cache) == r.cacheSize {
min := int(^uint(0) >> 1)
var oldestCache string
for key, value := range r.Cache {
if value.id < min {
min = value.id
oldestCache = key
}
}
delete(r.Cache, oldestCache)
}
r.Cache[id] = Result{id: r.lastCashId, result: result, err: err}
r.lastCashId++
}
func (r *MyRequester) AddRequest(request Request) {
r.locker <- struct{}{}
if r.stopped {
<-r.locker
return
}
<-r.locker
r.group.Add(1)
defer r.group.Done()
r.throttle <- struct{}{}
id := request.ID()
r.locker <- struct{}{}
_, ok := r.CurrentRequests[id]
<-r.locker
if !ok {
r.CurrentRequests[id] = make(chan struct{}, 1)
}
r.CurrentRequests[id] <- struct{}{}
r.locker <- struct{}{}
result, ok := r.Cache[id]
<-r.locker
if ok {
request.SetResult(result.result, result.err)
} else if !r.stopped {
result, err := request.Run()
if request.Cacheable() {
r.locker <- struct{}{}
r.AddCache(id, result, err)
<-r.locker
}
}
<-r.CurrentRequests[id]
<-r.throttle
}
func (r *MyRequester) Stop() {
r.locker <- struct{}{}
r.stopped = true
<-r.locker
r.group.Wait()
}
func NewRequester(cacheSize int, throttleSize int) Requester {
return &MyRequester{throttle: make(chan struct{}, throttleSize),
Cache: make(map[string]Result),
CurrentRequests: make(map[string]chan struct{}),
cacheSize: cacheSize,
locker: make(chan struct{}, 1),
stopped: false,
group: &sync.WaitGroup{},
}
}

Лог от изпълнението

PASS
ok  	_/tmp/d20160101-5892-yaeb32	0.003s
PASS
ok  	_/tmp/d20160101-5892-yaeb32	0.004s
PASS
ok  	_/tmp/d20160101-5892-yaeb32	0.103s
PASS
ok  	_/tmp/d20160101-5892-yaeb32	0.003s
PASS
ok  	_/tmp/d20160101-5892-yaeb32	0.003s
PASS
ok  	_/tmp/d20160101-5892-yaeb32	0.044s
PASS
ok  	_/tmp/d20160101-5892-yaeb32	0.005s
PASS
ok  	_/tmp/d20160101-5892-yaeb32	0.044s
PASS
ok  	_/tmp/d20160101-5892-yaeb32	0.053s
PASS
ok  	_/tmp/d20160101-5892-yaeb32	0.214s
PASS
ok  	_/tmp/d20160101-5892-yaeb32	0.033s
PASS
ok  	_/tmp/d20160101-5892-yaeb32	0.053s
PASS
ok  	_/tmp/d20160101-5892-yaeb32	0.114s

История (3 версии и 6 коментара)

Диана обнови решението на 26.12.2015 14:33 (преди над 2 години)

+package main
+
+type Request interface {
+ ID() string
+ Run() (result interface{}, err error)
+ Cacheable() bool
+ SetResult(result interface{}, err error)
+}
+
+type Requester interface {
+ AddRequest(request Request)
+ Stop()
+}
+
+type Result struct {
+ id int
+ result interface{}
+ err error
+}
+
+type MyRequester struct {
+ lastCashId int
+ currentCacheSize int
+ Cache map[string]Result
+ CurrentRequests map[string]chan struct{}
+ throttle chan struct{}
+ cacheSize int
+ stopped bool
+ locker chan struct{}
+}
+
+func (r MyRequester) AddCache(id string, result interface{}, err error) {
+ if r.currentCacheSize == r.cacheSize {
+ min := int(^uint(0) >> 1)
+ var oldestCache string
+ for key, value := range r.Cache {
+ if value.id < min {
+ min = value.id
+ oldestCache = key
+ }
+ }
+ delete(r.Cache, oldestCache)
+ } else {
+ r.currentCacheSize++
+ r.Cache[id] = Result{id: r.lastCashId, result: result, err: err}
+ r.lastCashId++
+ }
+}
+
+func (r MyRequester) AddRequest(request Request) {
+ if r.stopped {
+ return
+ }
+ r.throttle <- struct{}{}
+ id := request.ID()
+ if _, ok := r.CurrentRequests[id]; !ok {
+ r.CurrentRequests[id] = make(chan struct{}, 1)
+ }
+ r.CurrentRequests[id] <- struct{}{}
+ if result, ok := r.Cache[id]; ok {
+ request.SetResult(result.result, result.err)
+ } else {
+ result, err := request.Run()
+ if request.Cacheable() {
+ r.locker <- struct{}{}
+ r.AddCache(id, result, err)
+ <-r.locker
+ }
+ }
+ <-r.CurrentRequests[id]
+ <-r.throttle
+}
+
+func (r MyRequester) Stop() {
+ r.stopped = true
+}
+
+func NewRequester(cacheSize int, throttleSize int) Requester {
+ return MyRequester{throttle: make(chan struct{}, throttleSize),
+ Cache: make(map[string]Result),
+ CurrentRequests: make(map[string]chan struct{}),
+ cacheSize: cacheSize,
+ locker: make(chan struct{}, 1),
+ stopped: false,
+ }
+}

Весела Коледа :)

Дефинирането на всички методи върху MyRequester, а не *MyRequester определено прави Stop и AddCache леко бъгави.

Имаш известно количество race-condition-и с достъпи до мапове.

Цялата ти имплементация на Cache-а изглежда странно, currentCacheSize не е ли len(Cache) винаги? Какво се случва ако добавим нова стойност когато Cache-a e пълен?

Може би не е лоша да е в отделен тип?

Също така (нещо което явно не съм написал и ще видя дали мога да оправя в тестовете а не във условието) AddRequest е мислена асинхронно. Тоест:

r.AddRequest(fr)  // не блокира докато се изпълни 
r.AddRequest(fr2) // не блокира докато се изпълни
r.AddRequest(fr3) // не блокира докато се изпълни
// тук fr.Run може още да не е ръннато, а само да е добавена. 

Ще се опитам да го оправя в тестовете а не в условието с цел хората които не са го разбрали така да не са прецакани :). Ако не успея не е лошо да го знаеш :).

Весели празници и приятно кодене :).

Диана обнови решението на 30.12.2015 14:34 (преди над 2 години)

package main
+import "sync"
+
type Request interface {
ID() string
Run() (result interface{}, err error)
Cacheable() bool
SetResult(result interface{}, err error)
}
type Requester interface {
AddRequest(request Request)
Stop()
}
type Result struct {
id int
result interface{}
err error
}
type MyRequester struct {
- lastCashId int
- currentCacheSize int
- Cache map[string]Result
- CurrentRequests map[string]chan struct{}
- throttle chan struct{}
- cacheSize int
- stopped bool
- locker chan struct{}
+ lastCashId int
+ group *sync.WaitGroup
+ Cache map[string]Result
+ CurrentRequests map[string]chan struct{}
+ throttle chan struct{}
+ cacheSize int
+ stopped bool
+ locker chan struct{}
}
-func (r MyRequester) AddCache(id string, result interface{}, err error) {
- if r.currentCacheSize == r.cacheSize {
+func (r *MyRequester) AddCache(id string, result interface{}, err error) {
+ if len(r.Cache) == r.cacheSize {
min := int(^uint(0) >> 1)
var oldestCache string
for key, value := range r.Cache {
if value.id < min {
min = value.id
oldestCache = key
}
}
delete(r.Cache, oldestCache)
- } else {
- r.currentCacheSize++
- r.Cache[id] = Result{id: r.lastCashId, result: result, err: err}
- r.lastCashId++
}
+ r.Cache[id] = Result{id: r.lastCashId, result: result, err: err}
+ r.lastCashId++
}
-func (r MyRequester) AddRequest(request Request) {
+func (r *MyRequester) AddRequest(request Request) {
if r.stopped {
return
}
+
+ r.group.Add(1)
r.throttle <- struct{}{}
id := request.ID()
if _, ok := r.CurrentRequests[id]; !ok {
r.CurrentRequests[id] = make(chan struct{}, 1)
}
r.CurrentRequests[id] <- struct{}{}
- if result, ok := r.Cache[id]; ok {
+ r.locker <- struct{}{}
+ result, ok := r.Cache[id]
+ <-r.locker
+ if ok {
request.SetResult(result.result, result.err)
} else {
result, err := request.Run()
if request.Cacheable() {
r.locker <- struct{}{}
r.AddCache(id, result, err)
<-r.locker
}
}
<-r.CurrentRequests[id]
<-r.throttle
+ r.group.Done()
}
-func (r MyRequester) Stop() {
+func (r *MyRequester) Stop() {
r.stopped = true
+ r.group.Wait()
}
func NewRequester(cacheSize int, throttleSize int) Requester {
- return MyRequester{throttle: make(chan struct{}, throttleSize),
+ return &MyRequester{throttle: make(chan struct{}, throttleSize),
Cache: make(map[string]Result),
CurrentRequests: make(map[string]chan struct{}),
cacheSize: cacheSize,
locker: make(chan struct{}, 1),
stopped: false,
+ group: &sync.WaitGroup{},
}
}

Диана обнови решението на 31.12.2015 16:45 (преди над 2 години)

package main
import "sync"
type Request interface {
ID() string
Run() (result interface{}, err error)
Cacheable() bool
SetResult(result interface{}, err error)
}
type Requester interface {
AddRequest(request Request)
Stop()
}
type Result struct {
id int
result interface{}
err error
}
type MyRequester struct {
lastCashId int
group *sync.WaitGroup
Cache map[string]Result
CurrentRequests map[string]chan struct{}
throttle chan struct{}
cacheSize int
stopped bool
locker chan struct{}
}
func (r *MyRequester) AddCache(id string, result interface{}, err error) {
if len(r.Cache) == r.cacheSize {
min := int(^uint(0) >> 1)
var oldestCache string
for key, value := range r.Cache {
if value.id < min {
min = value.id
oldestCache = key
}
}
delete(r.Cache, oldestCache)
}
r.Cache[id] = Result{id: r.lastCashId, result: result, err: err}
r.lastCashId++
}
func (r *MyRequester) AddRequest(request Request) {
+ r.locker <- struct{}{}
if r.stopped {
+ <-r.locker
return
}
+ <-r.locker
r.group.Add(1)
+ defer r.group.Done()
r.throttle <- struct{}{}
id := request.ID()
- if _, ok := r.CurrentRequests[id]; !ok {
+
+ r.locker <- struct{}{}
+ _, ok := r.CurrentRequests[id]
+ <-r.locker
+
+ if !ok {
r.CurrentRequests[id] = make(chan struct{}, 1)
}
r.CurrentRequests[id] <- struct{}{}
r.locker <- struct{}{}
result, ok := r.Cache[id]
<-r.locker
if ok {
request.SetResult(result.result, result.err)
- } else {
+ } else if !r.stopped {
result, err := request.Run()
if request.Cacheable() {
r.locker <- struct{}{}
r.AddCache(id, result, err)
<-r.locker
}
}
<-r.CurrentRequests[id]
<-r.throttle
- r.group.Done()
}
func (r *MyRequester) Stop() {
+ r.locker <- struct{}{}
r.stopped = true
+ <-r.locker
+
r.group.Wait()
}
func NewRequester(cacheSize int, throttleSize int) Requester {
return &MyRequester{throttle: make(chan struct{}, throttleSize),
Cache: make(map[string]Result),
CurrentRequests: make(map[string]chan struct{}),
cacheSize: cacheSize,
locker: make(chan struct{}, 1),
stopped: false,
group: &sync.WaitGroup{},
}
}

Отнема ти се една точка заради понякога файлваш тест който просто в това извикване на тестовете от което е взет резултат не е фейлнал:

=== RUN   TestCacheSize
fatal error: all goroutines are asleep - deadlock!

goroutine 1 [chan receive]:
testing.RunTests(0x5e4978, 0x672e20, 0xd, 0xd, 0x1)
    /goroot/src/testing/testing.go:562 +0x8ad
testing.(*M).Run(0xc820045ef8, 0xc82000e660)
    /goroot/src/testing/testing.go:494 +0x70
main.main()
    _/diana/_test/_testmain.go:78 +0x116

goroutine 34 [chan receive]:
_/diana.TestCacheSize(0xc8200b2000)
    /diana/solution_test.go:299 +0xba4
testing.tRunner(0xc8200b2000, 0x672eb0)
    /goroot/src/testing/testing.go:456 +0x98
created by testing.RunTests
    /goroot/src/testing/testing.go:561 +0x86d
exit status 2

В допълнение имаш race-condition който понякога се засича от race detector-а на go :)

=== RUN   TestThrottleSize
==================
WARNING: DATA RACE
Read by goroutine 32:
  runtime.mapaccess1_faststr()
      /goroot/src/runtime/hashmap_fast.go:179 +0x0
  _/diana.(*MyRequester).AddRequest()
      /diana/diana.go:84 +0x66a

Previous write by goroutine 33:
  runtime.mapassign1()
      /goroot/src/runtime/hashmap.go:411 +0x0
  _/diana.(*MyRequester).AddRequest()
      /diana/diana.go:68 +0x396

Goroutine 32 (running) created at:
  _/diana.TestThrottleSize()
      /diana/solution_test.go:353 +0x94f
  testing.tRunner()
      /goroot/src/testing/testing.go:456 +0xdc

Goroutine 33 (running) created at:
  _/diana.TestThrottleSize()
      /diana/solution_test.go:354 +0x9be
  testing.tRunner()
      /goroot/src/testing/testing.go:456 +0xdc
==================
--- PASS: TestThrottleSize (0.04s)