Решение на HTTP сваляч от Даниел Тасков

Обратно към всички решения

Към профила на Даниел Тасков

Резултати

  • 6 точки от тестове
  • 0 бонус точки
  • 6 точки общо
  • 8 успешни тест(а)
  • 5 неуспешни тест(а)

Код

package main
import "sync"
type empty struct{}
type Request interface {
ID() string
Run() (result interface{}, err error)
Cacheable() bool
SetResult(result interface{}, err error)
}
type Requester interface {
AddRequest(request Request)
Stop()
}
func NewRequester(cacheSize int, throttleSize int) Requester {
r := new(myRequester)
r.cache = newCache(cacheSize)
r.threshold = make(chan empty, throttleSize)
r.processing = map[string]*sync.Mutex{}
r.running = true
return r
}
type myRequester struct {
cache cache
threshold chan empty
processing map[string]*sync.Mutex
running bool
requests sync.WaitGroup
sync.Mutex
}
func (mr *myRequester) AddRequest(request Request) {
mr.Lock()
running := mr.running
mr.Unlock()
if running {
id := request.ID()
mr.Lock()
if _, ok := mr.processing[id]; !ok {
mr.processing[id] = new(sync.Mutex)
}
mr.processing[id].Lock()
mr.Unlock()
mr.requests.Add(1)
defer mr.requests.Done()
if v, ok := mr.cache.get(id); ok {
request.SetResult(v.val(), v.err())
} else {
mr.threshold <- empty{}
val, err := request.Run()
<-mr.threshold
if request.Cacheable() {
mr.cache.cache(id, newCacheable(val, err))
}
}
mr.Lock()
mr.processing[id].Unlock()
mr.Unlock()
}
}
func (mr *myRequester) Stop() {
mr.Lock()
mr.running = false
mr.Unlock()
mr.requests.Wait()
}
type cache interface {
get(id string) (cacheable, bool)
cache(id string, c cacheable)
}
func newCache(cacheSize int) cache {
buf := new(buffer)
buf.indexes = map[string]int{}
buf.ids = map[int]string{}
buf.buffered = make([]bufferable, cacheSize, cacheSize)
buf.last = 0
buf.size = cacheSize
return buf
}
type buffer struct {
indexes map[string]int
ids map[int]string
buffered []bufferable
last int
size int
sync.Mutex
}
func (b *buffer) get(id string) (cacheable, bool) {
b.Lock()
defer b.Unlock()
if i, ok := b.indexes[id]; ok {
val := b.buffered[i]
return &val, true
}
return nil, false
}
func (b *buffer) cache(id string, c cacheable) {
b.Lock()
defer b.Unlock()
oldid := b.ids[b.last]
delete(b.ids, b.last)
delete(b.indexes, oldid)
b.ids[b.last] = id
b.indexes[id] = b.last
b.buffered[b.last] = bufferable{c.val(), c.err()}
b.last++
b.last %= b.size
}
type cacheable interface {
val() interface{}
err() error
}
func newCacheable(val interface{}, err error) cacheable {
return &bufferable{val, err}
}
type bufferable struct {
value interface{}
error error
}
func (b *bufferable) val() interface{} {
return b.value
}
func (b *bufferable) err() error {
return b.error
}

Лог от изпълнението

▸ Покажи лога

История (3 версии и 5 коментара)

Даниел обнови решението на 25.12.2015 17:24 (преди над 2 години)

▸ Покажи разликите
+package main
+
+import "sync"
+
+type Request interface {
+ ID() string
+ Run() (result interface{}, err error)
+ Cacheable() bool
+ SetResult(result interface{}, err error)
+}
+
+type Requester interface {
+ AddRequest(request Request)
+ Stop()
+}
+
+func NewRequester(cacheSize int, throttleSize int) Requester {
+ r := new(myRequester)
+ r.cache = newCache(cacheSize)
+ r.threshold = make(semaphore, throttleSize)
+ r.running = true
+ return r
+}
+
+type empty struct{}
+type semaphore chan empty
+
+type myRequester struct {
+ cache cache
+ threshold semaphore
+ running bool
+ sync.Mutex
+}
+
+func (mr *myRequester) AddRequest(request Request) {
+ go func() {
+ mr.threshold <- empty{}
+ mr.Lock()
+ running := mr.running
+ mr.Unlock()
+ if running {
+ id := request.ID()
+ cached := mr.cache.contains(id)
+ if cached {
+ request.SetResult(mr.cache.value(id).val(), mr.cache.value(id).err())
+ } else if val, err := request.Run(); request.Cacheable() {
+ mr.cache.cache(id, &bufferable{val, err})
+ }
+ }
+ <-mr.threshold
+ }()
+}
+
+func (mr *myRequester) Stop() {
+ mr.Lock()
+ mr.running = false
+ mr.Unlock()
+}
+
+type cache interface {
+ contains(id string) bool
+ value(id string) cacheable
+ cache(id string, c cacheable)
+}
+
+func newCache(cacheSize int) cache {
+ buf := new(buffer)
+ buf.indexes = map[string]int{}
+ buf.ids = map[int]string{}
+ buf.buffered = make([]bufferable, cacheSize, cacheSize)
+ buf.last = 0
+ buf.size = cacheSize
+ return buf
+}
+
+type buffer struct {
+ indexes map[string]int
+ ids map[int]string
+ buffered []bufferable
+ last int
+ size int
+ sync.Mutex
+}
+
+func (b *buffer) contains(id string) bool {
+ b.Lock()
+ defer b.Unlock()
+ _, ok := b.indexes[id]
+ return ok
+}
+
+func (b *buffer) value(id string) cacheable {
+ b.Lock()
+ defer b.Unlock()
+ val := b.buffered[b.indexes[id]]
+ return &val
+}
+
+func (b *buffer) cache(id string, c cacheable) {
+ b.Lock()
+ defer b.Unlock()
+ oldid := b.ids[b.last]
+ delete(b.ids, b.last)
+ delete(b.indexes, oldid)
+
+ b.ids[b.last] = id
+ b.indexes[id] = b.last
+ b.buffered[b.last] = bufferable{c.val(), c.err()}
+ b.last++
+ b.last %= b.size
+}
+
+type cacheable interface {
+ val() interface{}
+ err() error
+}
+
+type bufferable struct {
+ value interface{}
+ error error
+}
+
+func (b *bufferable) val() interface{} {
+ return b.value
+}
+
+func (b *bufferable) err() error {
+ return b.error
+}

Весела Коледа и Благодаря за коледния Подарък, но:

  1. Не хващаш случая в който се подават две заявки една след друга които са еднакви и към момента на подаване на втората първата още не е свършила.
  2. throtleSize-а е за изпълняваните заявки - тези на които им се вика Run.
  3. chan struct {} е толкова познато на всички go програмисти че тези типове които си направил са напълно ненужни.

Иначе ми харесва как си имаш свой си интерфейс и си го имплементираш(cache/buffer).

Весели Празници и приятно писане :smile:

Даниел обнови решението на 26.12.2015 21:24 (преди над 2 години)

▸ Покажи разликите
package main
import "sync"
+type empty struct{}
+
type Request interface {
ID() string
Run() (result interface{}, err error)
Cacheable() bool
SetResult(result interface{}, err error)
}
type Requester interface {
AddRequest(request Request)
Stop()
}
func NewRequester(cacheSize int, throttleSize int) Requester {
r := new(myRequester)
r.cache = newCache(cacheSize)
- r.threshold = make(semaphore, throttleSize)
+ r.threshold = make(chan empty, throttleSize)
r.running = true
+ r.processing = map[string]empty{}
return r
}
-type empty struct{}
-type semaphore chan empty
-
type myRequester struct {
- cache cache
- threshold semaphore
- running bool
+ cache cache
+ threshold chan empty
+ running bool
+ processing map[string]empty
sync.Mutex
}
func (mr *myRequester) AddRequest(request Request) {
- go func() {
- mr.threshold <- empty{}
+ mr.Lock()
+ running := mr.running
+ mr.Unlock()
+
+ if running {
+ id := request.ID()
+ mr.waitToProcessRequest(id)
+
+ if v, ok := mr.cache.get(id); ok {
+ request.SetResult(v.val(), v.err())
+ } else {
+ mr.threshold <- empty{}
+ val, err := request.Run()
+ <-mr.threshold
+ if request.Cacheable() {
+ mr.cache.cache(id, newCacheable(val, err))
+ }
+ }
mr.Lock()
- running := mr.running
+ delete(mr.processing, id)
mr.Unlock()
- if running {
- id := request.ID()
- cached := mr.cache.contains(id)
- if cached {
- request.SetResult(mr.cache.value(id).val(), mr.cache.value(id).err())
- } else if val, err := request.Run(); request.Cacheable() {
- mr.cache.cache(id, &bufferable{val, err})
- }
+ }
+}
+
+func (mr *myRequester) waitToProcessRequest(id string) {
+ for ok := true; ok; {
+ mr.Lock()
+ if _, ok = mr.processing[id]; !ok {
+ mr.processing[id] = empty{}
}
- <-mr.threshold
- }()
+ mr.Unlock()
+ }
}
func (mr *myRequester) Stop() {
mr.Lock()
mr.running = false
mr.Unlock()
}
type cache interface {
- contains(id string) bool
- value(id string) cacheable
+ get(id string) (cacheable, bool)
cache(id string, c cacheable)
}
func newCache(cacheSize int) cache {
buf := new(buffer)
buf.indexes = map[string]int{}
buf.ids = map[int]string{}
buf.buffered = make([]bufferable, cacheSize, cacheSize)
buf.last = 0
buf.size = cacheSize
+
return buf
}
type buffer struct {
indexes map[string]int
ids map[int]string
buffered []bufferable
last int
size int
sync.Mutex
}
-func (b *buffer) contains(id string) bool {
+func (b *buffer) get(id string) (cacheable, bool) {
b.Lock()
defer b.Unlock()
- _, ok := b.indexes[id]
- return ok
-}
-func (b *buffer) value(id string) cacheable {
- b.Lock()
- defer b.Unlock()
- val := b.buffered[b.indexes[id]]
- return &val
+ if i, ok := b.indexes[id]; ok {
+ val := b.buffered[i]
+ return &val, true
+ }
+ return nil, false
}
func (b *buffer) cache(id string, c cacheable) {
b.Lock()
defer b.Unlock()
+
oldid := b.ids[b.last]
delete(b.ids, b.last)
delete(b.indexes, oldid)
b.ids[b.last] = id
b.indexes[id] = b.last
b.buffered[b.last] = bufferable{c.val(), c.err()}
b.last++
b.last %= b.size
}
type cacheable interface {
val() interface{}
err() error
+}
+
+func newCacheable(val interface{}, err error) cacheable {
+ return &bufferable{val, err}
}
type bufferable struct {
value interface{}
error error
}
func (b *bufferable) val() interface{} {
return b.value
}
func (b *bufferable) err() error {
return b.error
}

Весела Коледа! Иначее страхотен коледен подарък, няма що -.-

Оправих 1. и 2., аа за 3. махнах type semaphore chan empty, но type empty struct{} го оставих, защото са ми много грозни тези struct{}{} навсякъде, а и доколкото виждам е доста използвано и из Go обществото, или ? : )

Добър вечер,

На бързо и градус:

  1. какво се случва във waitToProcessRequest ако се получат два request-а за едно и също ID и request-а е много бавен(минути)?
  2. Изглежда ми че throttle-ването ти не е на изпълнение а на кеширане
  3. Прочети условието на Stop и първия ми коментар във форума, този в отговор на твоя въпрос.

Лека вечер, приятно кодене и Весела Нова Година.

Даниел обнови решението на 30.12.2015 01:26 (преди над 2 години)

▸ Покажи разликите
package main
import "sync"
type empty struct{}
type Request interface {
ID() string
Run() (result interface{}, err error)
Cacheable() bool
SetResult(result interface{}, err error)
}
type Requester interface {
AddRequest(request Request)
Stop()
}
func NewRequester(cacheSize int, throttleSize int) Requester {
r := new(myRequester)
r.cache = newCache(cacheSize)
r.threshold = make(chan empty, throttleSize)
+ r.processing = map[string]*sync.Mutex{}
r.running = true
- r.processing = map[string]empty{}
return r
}
type myRequester struct {
cache cache
threshold chan empty
+ processing map[string]*sync.Mutex
running bool
- processing map[string]empty
+ requests sync.WaitGroup
sync.Mutex
}
func (mr *myRequester) AddRequest(request Request) {
mr.Lock()
running := mr.running
mr.Unlock()
if running {
id := request.ID()
- mr.waitToProcessRequest(id)
+ mr.Lock()
+ if _, ok := mr.processing[id]; !ok {
+ mr.processing[id] = new(sync.Mutex)
+ }
+ mr.processing[id].Lock()
+ mr.Unlock()
+
+ mr.requests.Add(1)
+ defer mr.requests.Done()
+
if v, ok := mr.cache.get(id); ok {
request.SetResult(v.val(), v.err())
} else {
mr.threshold <- empty{}
val, err := request.Run()
<-mr.threshold
if request.Cacheable() {
mr.cache.cache(id, newCacheable(val, err))
}
}
mr.Lock()
- delete(mr.processing, id)
+ mr.processing[id].Unlock()
mr.Unlock()
}
}
-func (mr *myRequester) waitToProcessRequest(id string) {
- for ok := true; ok; {
- mr.Lock()
- if _, ok = mr.processing[id]; !ok {
- mr.processing[id] = empty{}
- }
- mr.Unlock()
- }
-}
-
func (mr *myRequester) Stop() {
mr.Lock()
mr.running = false
mr.Unlock()
+ mr.requests.Wait()
}
type cache interface {
get(id string) (cacheable, bool)
cache(id string, c cacheable)
}
func newCache(cacheSize int) cache {
buf := new(buffer)
buf.indexes = map[string]int{}
buf.ids = map[int]string{}
buf.buffered = make([]bufferable, cacheSize, cacheSize)
buf.last = 0
buf.size = cacheSize
return buf
}
type buffer struct {
indexes map[string]int
ids map[int]string
buffered []bufferable
last int
size int
sync.Mutex
}
func (b *buffer) get(id string) (cacheable, bool) {
b.Lock()
defer b.Unlock()
if i, ok := b.indexes[id]; ok {
val := b.buffered[i]
return &val, true
}
return nil, false
}
func (b *buffer) cache(id string, c cacheable) {
b.Lock()
defer b.Unlock()
oldid := b.ids[b.last]
delete(b.ids, b.last)
delete(b.indexes, oldid)
b.ids[b.last] = id
b.indexes[id] = b.last
b.buffered[b.last] = bufferable{c.val(), c.err()}
b.last++
b.last %= b.size
}
type cacheable interface {
val() interface{}
err() error
}
func newCacheable(val interface{}, err error) cacheable {
return &bufferable{val, err}
}
type bufferable struct {
value interface{}
error error
}
func (b *bufferable) val() interface{} {
return b.value
}
func (b *bufferable) err() error {
return b.error
}