Решение на HTTP сваляч от Ангел Ангелов

Обратно към всички решения

Към профила на Ангел Ангелов

Резултати

  • 8 точки от тестове
  • 0 бонус точки
  • 8 точки общо
  • 11 успешни тест(а)
  • 2 неуспешни тест(а)

Код

package main
import (
"sync"
)
type Request interface {
ID() string
Run() (result interface{}, err error)
Cacheable() bool
SetResult(result interface{}, err error)
}
type Requester interface {
AddRequest(request Request)
Stop()
}
type requestResult struct {
result interface{}
err error
id string
}
type MyRequester struct {
mapLocker *sync.Mutex
throttleLocker chan struct{}
results *buffer
running bool
runningTasks map[string]*sync.Mutex
stopGroup *sync.WaitGroup
}
type buffer struct {
data []*requestResult
size int
last int
}
func newBuffer(maxSize int) *buffer {
return &buffer{
data: make([]*requestResult, maxSize),
}
}
func (b *buffer) push(result *requestResult) {
b.data[b.last] = result
b.last = (b.last + 1) % len(b.data)
if b.size < b.last {
b.size = b.last
}
}
func (b *buffer) get(id string) (*requestResult, bool) {
for i := 0; i < b.size; i++ {
if b.data[i].id == id {
return b.data[i], true
}
}
return nil, false
}
func NewRequester(cacheSize int, throttleSize int) Requester {
return &MyRequester{
mapLocker: &sync.Mutex{},
throttleLocker: make(chan struct{}, throttleSize),
results: newBuffer(cacheSize),
running: true,
runningTasks: make(map[string]*sync.Mutex),
stopGroup: &sync.WaitGroup{},
}
}
func (m *MyRequester) AddRequest(request Request) {
if !m.running {
return
}
m.stopGroup.Add(1)
defer m.stopGroup.Done()
id := request.ID()
m.mapLocker.Lock()
locker, ok := m.runningTasks[id]
m.mapLocker.Unlock()
if !ok {
locker = &sync.Mutex{}
m.runningTasks[id] = locker
}
locker.Lock()
m.mapLocker.Lock()
result, ok := m.results.get(id)
m.mapLocker.Unlock()
if ok {
request.SetResult(result.result, result.err)
locker.Unlock()
} else {
result = &requestResult{
id: id,
}
if !m.running {
return
}
m.throttleLocker <- struct{}{}
result.result, result.err = request.Run()
<-m.throttleLocker
m.mapLocker.Lock()
if request.Cacheable() {
m.results.push(result)
}
locker.Unlock()
delete(m.runningTasks, id)
m.mapLocker.Unlock()
}
}
func (m *MyRequester) Stop() {
m.running = false
m.stopGroup.Wait()
}

Лог от изпълнението

PASS
ok  	_/tmp/d20160101-5892-orabhg	0.003s
PASS
ok  	_/tmp/d20160101-5892-orabhg	0.003s
PASS
ok  	_/tmp/d20160101-5892-orabhg	0.103s
PASS
ok  	_/tmp/d20160101-5892-orabhg	0.003s
PASS
ok  	_/tmp/d20160101-5892-orabhg	0.003s
PASS
ok  	_/tmp/d20160101-5892-orabhg	0.043s
panic: test timed out after 1s

goroutine 17 [running]:
testing.startAlarm.func1()
	/usr/local/go/src/testing/testing.go:703 +0x132
created by time.goFunc
	/usr/local/go/src/time/sleep.go:129 +0x3a

goroutine 1 [chan receive]:
testing.RunTests(0x5e3338, 0x670e20, 0xd, 0xd, 0x1)
	/usr/local/go/src/testing/testing.go:562 +0x8ad
testing.(*M).Run(0xc82003fef8, 0xc820010650)
	/usr/local/go/src/testing/testing.go:494 +0x70
main.main()
	_/tmp/d20160101-5892-orabhg/_test/_testmain.go:78 +0x116

goroutine 6 [chan receive]:
_/tmp/d20160101-5892-orabhg.TestCacheSize(0xc82008e000)
	/tmp/d20160101-5892-orabhg/solution_test.go:299 +0xc02
testing.tRunner(0xc82008e000, 0x670eb0)
	/usr/local/go/src/testing/testing.go:456 +0x98
created by testing.RunTests
	/usr/local/go/src/testing/testing.go:561 +0x86d
exit status 2
FAIL	_/tmp/d20160101-5892-orabhg	1.005s
PASS
ok  	_/tmp/d20160101-5892-orabhg	0.043s
PASS
ok  	_/tmp/d20160101-5892-orabhg	0.053s
panic: test timed out after 1s

goroutine 5 [running]:
testing.startAlarm.func1()
	/usr/local/go/src/testing/testing.go:703 +0x132
created by time.goFunc
	/usr/local/go/src/time/sleep.go:129 +0x3a

goroutine 1 [chan receive]:
testing.RunTests(0x5e3338, 0x670e20, 0xd, 0xd, 0x1)
	/usr/local/go/src/testing/testing.go:562 +0x8ad
testing.(*M).Run(0xc82003fef8, 0xc820062510)
	/usr/local/go/src/testing/testing.go:494 +0x70
main.main()
	_/tmp/d20160101-5892-orabhg/_test/_testmain.go:78 +0x116

goroutine 20 [chan receive]:
_/tmp/d20160101-5892-orabhg.TestStopWithQueue(0xc8200a2000)
	/tmp/d20160101-5892-orabhg/solution_test.go:455 +0xa7c
testing.tRunner(0xc8200a2000, 0x670ef8)
	/usr/local/go/src/testing/testing.go:456 +0x98
created by testing.RunTests
	/usr/local/go/src/testing/testing.go:561 +0x86d
exit status 2
FAIL	_/tmp/d20160101-5892-orabhg	1.005s
PASS
ok  	_/tmp/d20160101-5892-orabhg	0.033s
PASS
ok  	_/tmp/d20160101-5892-orabhg	0.053s
PASS
ok  	_/tmp/d20160101-5892-orabhg	0.113s

История (4 версии и 4 коментара)

Ангел обнови решението на 26.12.2015 16:06 (преди над 2 години)

+package main
+
+import (
+ "sync"
+)
+
+type Request interface {
+ ID() string
+ Run() (result interface{}, err error)
+ Cacheable() bool
+ SetResult(result interface{}, err error)
+}
+
+type Requester interface {
+ AddRequest(request Request)
+ Stop()
+}
+
+type requestResult struct {
+ result interface{}
+ err error
+ cacheable bool
+ locker *sync.Mutex
+ id string
+}
+
+type MyRequester struct {
+ mapLocker *sync.Mutex
+ throttleLocker chan struct{}
+ results *buffer
+ running bool
+}
+
+type buffer struct {
+ data []*requestResult
+ size int
+ last int
+}
+
+func newBuffer(maxSize int) *buffer {
+ return &buffer{
+ data: make([]*requestResult, maxSize),
+ size: 0,
+ last: 0,
+ }
+}
+
+func (b *buffer) push(result *requestResult) {
+ b.data[b.last] = result
+ b.last = (b.last + 1) % cap(b.data)
+ if b.size < b.last {
+ b.size = b.last
+ }
+}
+
+func (b *buffer) get(id string) (*requestResult, bool) {
+ for i := 0; i < b.size; i++ {
+ if b.data[i].id == id {
+ return b.data[i], true
+ }
+ }
+ return nil, false
+}
+
+func NewRequester(cacheSize int, throttleSize int) Requester {
+ return &MyRequester{
+ mapLocker: &sync.Mutex{},
+ throttleLocker: make(chan struct{}, throttleSize),
+ results: newBuffer(cacheSize),
+ running: true,
+ }
+}
+
+func (m *MyRequester) AddRequest(request Request) {
+ if !m.running {
+ return
+ }
+
+ m.throttleLocker <- struct{}{}
+
+ id := request.ID()
+
+ m.mapLocker.Lock()
+ result, ok := m.results.get(id)
+ if !ok {
+ result = &requestResult{
+ locker: &sync.Mutex{},
+ cacheable: false,
+ id: id,
+ }
+ m.results.push(result)
+ }
+ m.mapLocker.Unlock()
+
+ result.locker.Lock()
+ if result.cacheable {
+ request.SetResult(result.result, result.err)
+ } else {
+ result.result, result.err = request.Run()
+ result.cacheable = request.Cacheable()
+ }
+ result.locker.Unlock()
+
+ <-m.throttleLocker
+}
+
+func (m *MyRequester) Stop() {
+ m.running = false
+}

Весела Коледа.

Много добро количество локове. Принципно има и други начини, но и този става. В допълнение по темата: ако си ръннеш кода с '-race' и извикваш Stop и AddRequest ще ти каже че има racecondtion на m.running. Това не смятам че е проблем, но може да го оправиш.

Проблема е имплементацията на cache-а която нещо неработи :cry:.

Също така ако пазиш нещо в кеша преди да знаеш дали е кешируемо ще ти напълни кеша с неща които не са кешируеми и няма да имаш място за друго :wink:

btw, стойностите в go се инициализират на 0 така че ред 43,44 не са необходими

Весели празници и приятно кодене

Ангел обнови решението на 26.12.2015 22:45 (преди над 2 години)

package main
import (
"sync"
)
type Request interface {
ID() string
Run() (result interface{}, err error)
Cacheable() bool
SetResult(result interface{}, err error)
}
type Requester interface {
AddRequest(request Request)
Stop()
}
type requestResult struct {
- result interface{}
- err error
- cacheable bool
- locker *sync.Mutex
- id string
+ result interface{}
+ err error
+ id string
}
type MyRequester struct {
mapLocker *sync.Mutex
throttleLocker chan struct{}
results *buffer
running bool
+ runningTasks map[string]*sync.Mutex
}
type buffer struct {
data []*requestResult
size int
last int
}
func newBuffer(maxSize int) *buffer {
return &buffer{
data: make([]*requestResult, maxSize),
- size: 0,
- last: 0,
}
}
func (b *buffer) push(result *requestResult) {
b.data[b.last] = result
- b.last = (b.last + 1) % cap(b.data)
+ b.last = (b.last + 1) % len(b.data)
if b.size < b.last {
b.size = b.last
}
}
func (b *buffer) get(id string) (*requestResult, bool) {
for i := 0; i < b.size; i++ {
if b.data[i].id == id {
return b.data[i], true
}
}
return nil, false
}
func NewRequester(cacheSize int, throttleSize int) Requester {
return &MyRequester{
mapLocker: &sync.Mutex{},
throttleLocker: make(chan struct{}, throttleSize),
results: newBuffer(cacheSize),
running: true,
+ runningTasks: make(map[string]*sync.Mutex),
}
}
func (m *MyRequester) AddRequest(request Request) {
if !m.running {
return
}
m.throttleLocker <- struct{}{}
id := request.ID()
m.mapLocker.Lock()
result, ok := m.results.get(id)
- if !ok {
- result = &requestResult{
- locker: &sync.Mutex{},
- cacheable: false,
- id: id,
- }
- m.results.push(result)
- }
- m.mapLocker.Unlock()
-
- result.locker.Lock()
- if result.cacheable {
+ if ok {
+ m.mapLocker.Unlock()
request.SetResult(result.result, result.err)
} else {
+ locker, ok := m.runningTasks[id]
+ if !ok {
+ locker = &sync.Mutex{}
+ m.runningTasks[id] = locker
+ }
+ m.mapLocker.Unlock()
+
+ locker.Lock()
+ result = &requestResult{
+ id: id,
+ }
result.result, result.err = request.Run()
- result.cacheable = request.Cacheable()
+ if request.Cacheable() {
+ m.results.push(result)
+ }
+ locker.Unlock()
}
- result.locker.Unlock()
<-m.throttleLocker
}
func (m *MyRequester) Stop() {
m.running = false
}

Защо има race condition? Не би ли трябвало работата с bool да е атомарна?

Също така, не съм сигурен дали разбирам условието: AddRequest трябва ли да блокира, докато се изпълнява Run() (както съм го направил в момента)? Stop() трябва ли да блокира, изчаквайки всички задачи, които рънват в момента?

П.П. проблемЪТ

Go race detector не е съгласен с твоето мнение(което аз подкрепям принципно). Не мога да намеря обяснение на какво точно може да се обърка.

Stop очевидно трябва да е блокиращ ако ще чака - което при теб не е така :).

AddRequest беше мислено да не е блокиращ, иначе щеше да връща резултат :).

Но понеже се оказа че специално за AddRequest никъде не пише че не е блокиращ и дали чака или не чака нещо, преправих тестовете да работят с блокиращ AddRequest.

п.п. buffer-а ти все още неработи :)

Ангел обнови решението на 31.12.2015 10:24 (преди над 2 години)

package main
import (
"sync"
)
type Request interface {
ID() string
Run() (result interface{}, err error)
Cacheable() bool
SetResult(result interface{}, err error)
}
type Requester interface {
AddRequest(request Request)
Stop()
}
type requestResult struct {
result interface{}
err error
id string
}
type MyRequester struct {
mapLocker *sync.Mutex
throttleLocker chan struct{}
results *buffer
running bool
runningTasks map[string]*sync.Mutex
+ stopGroup *sync.WaitGroup
}
type buffer struct {
data []*requestResult
size int
last int
}
func newBuffer(maxSize int) *buffer {
return &buffer{
data: make([]*requestResult, maxSize),
}
}
func (b *buffer) push(result *requestResult) {
b.data[b.last] = result
b.last = (b.last + 1) % len(b.data)
if b.size < b.last {
b.size = b.last
}
}
func (b *buffer) get(id string) (*requestResult, bool) {
for i := 0; i < b.size; i++ {
if b.data[i].id == id {
return b.data[i], true
}
}
return nil, false
}
func NewRequester(cacheSize int, throttleSize int) Requester {
return &MyRequester{
mapLocker: &sync.Mutex{},
throttleLocker: make(chan struct{}, throttleSize),
results: newBuffer(cacheSize),
running: true,
runningTasks: make(map[string]*sync.Mutex),
+ stopGroup: &sync.WaitGroup{},
}
}
func (m *MyRequester) AddRequest(request Request) {
if !m.running {
return
}
+ m.stopGroup.Add(1)
+
m.throttleLocker <- struct{}{}
id := request.ID()
m.mapLocker.Lock()
result, ok := m.results.get(id)
if ok {
m.mapLocker.Unlock()
request.SetResult(result.result, result.err)
} else {
locker, ok := m.runningTasks[id]
if !ok {
locker = &sync.Mutex{}
m.runningTasks[id] = locker
}
+ locker.Lock()
m.mapLocker.Unlock()
- locker.Lock()
result = &requestResult{
id: id,
}
+
+ m.mapLocker.Lock()
result.result, result.err = request.Run()
if request.Cacheable() {
m.results.push(result)
}
locker.Unlock()
+ delete(m.runningTasks, id)
+ m.mapLocker.Unlock()
}
<-m.throttleLocker
+
+ m.stopGroup.Done()
}
func (m *MyRequester) Stop() {
m.running = false
+ m.stopGroup.Wait()
}

Здрасти,

  • Част от идеята на Requester-а че ако добавиш две неравни заявки, те ще бъдат изпълнение асинхронно(ако throttle-а е достатъчно висок). При теб тези локове гарантират че само една заявка ще бъде Run-вана.
  • 49, 50 ред изглежда грешно, тестовете потвърждават че нещо не е съвсем наред
  • Изтегли си последните тестове и рънвай тях

п.п. Приятно изкарване на Новата Година

Ангел обнови решението на 31.12.2015 17:47 (преди над 2 години)

package main
import (
"sync"
)
type Request interface {
ID() string
Run() (result interface{}, err error)
Cacheable() bool
SetResult(result interface{}, err error)
}
type Requester interface {
AddRequest(request Request)
Stop()
}
type requestResult struct {
result interface{}
err error
id string
}
type MyRequester struct {
mapLocker *sync.Mutex
throttleLocker chan struct{}
results *buffer
running bool
runningTasks map[string]*sync.Mutex
stopGroup *sync.WaitGroup
}
type buffer struct {
data []*requestResult
size int
last int
}
func newBuffer(maxSize int) *buffer {
return &buffer{
data: make([]*requestResult, maxSize),
}
}
func (b *buffer) push(result *requestResult) {
b.data[b.last] = result
b.last = (b.last + 1) % len(b.data)
if b.size < b.last {
b.size = b.last
}
}
func (b *buffer) get(id string) (*requestResult, bool) {
for i := 0; i < b.size; i++ {
if b.data[i].id == id {
return b.data[i], true
}
}
return nil, false
}
func NewRequester(cacheSize int, throttleSize int) Requester {
return &MyRequester{
mapLocker: &sync.Mutex{},
throttleLocker: make(chan struct{}, throttleSize),
results: newBuffer(cacheSize),
running: true,
runningTasks: make(map[string]*sync.Mutex),
stopGroup: &sync.WaitGroup{},
}
}
func (m *MyRequester) AddRequest(request Request) {
if !m.running {
return
}
m.stopGroup.Add(1)
+ defer m.stopGroup.Done()
- m.throttleLocker <- struct{}{}
-
id := request.ID()
m.mapLocker.Lock()
+ locker, ok := m.runningTasks[id]
+ m.mapLocker.Unlock()
+ if !ok {
+ locker = &sync.Mutex{}
+ m.runningTasks[id] = locker
+ }
+ locker.Lock()
+
+ m.mapLocker.Lock()
result, ok := m.results.get(id)
+ m.mapLocker.Unlock()
if ok {
- m.mapLocker.Unlock()
request.SetResult(result.result, result.err)
+ locker.Unlock()
} else {
- locker, ok := m.runningTasks[id]
- if !ok {
- locker = &sync.Mutex{}
- m.runningTasks[id] = locker
- }
- locker.Lock()
- m.mapLocker.Unlock()
-
result = &requestResult{
id: id,
}
- m.mapLocker.Lock()
+ if !m.running {
+ return
+ }
+
+ m.throttleLocker <- struct{}{}
result.result, result.err = request.Run()
+ <-m.throttleLocker
+
+ m.mapLocker.Lock()
if request.Cacheable() {
m.results.push(result)
}
locker.Unlock()
delete(m.runningTasks, id)
m.mapLocker.Unlock()
}
-
- <-m.throttleLocker
-
- m.stopGroup.Done()
}
func (m *MyRequester) Stop() {
m.running = false
m.stopGroup.Wait()
}