Решение на HTTP сваляч от Катя Спасова

Обратно към всички решения

Към профила на Катя Спасова

Резултати

  • 9 точки от тестове
  • 0 бонус точки
  • 9 точки общо
  • 12 успешни тест(а)
  • 1 неуспешни тест(а)

Код

package main
import (
"sync"
)
type Request interface {
// return the id if the Request. If two Requests are the same, they have the same id
ID() string
// Blocks while it executes the Request
// Returns the result from the execution or an error
// The result and the error shoukd not be passed to SetResult
// of the current request - They are stored internally before they are returned
Run() (result interface{}, err error)
// Returns a flag if the Request is cacheble
// Has an unidentified behaviour id the called before `Run`
Cacheable() bool
// Sets the result of the Request
// Should not be called for Request for which `Run` has been called.
SetResult(result interface{}, err error)
}
type Requester interface {
// Adds request and executes it if this us necessary at the first possible time
AddRequest(request Request)
// Stops the Requester. Waits all started Requests and call `SetResult` for all requests
// that for which a same type Request has been executed
// No new Request should be added at this time. No Requsts should be queued for calling
// of `SetResult`
Stop()
}
// Returns a new Requester, which cashes the responses of cacheSize Requests
// and executed no more than throttleSize Requests at the same time
func NewRequester(cacheSize int, throttleSize int) Requester {
requester := &MyRequester{cacheSize: cacheSize, throttleSize: throttleSize}
requester.init()
go requester.start()
return requester
}
// Implemenation of Requester interface
type MyRequester struct {
cacheSize int
throttleSize int
running bool
mutex sync.Mutex
queue []Request
cache map[string]ExecutionResult
cachedIds []string
executionPool map[string]*Request
cond *sync.Cond
finishCond chan (struct{})
}
// Initialises all fields of MyRequester
func (requester *MyRequester) init() {
requester.running = true
requester.mutex = sync.Mutex{}
requester.queue = make([]Request, 0)
requester.cache = make(map[string]ExecutionResult, 0)
requester.cachedIds = make([]string, 0)
requester.executionPool = make(map[string]*Request)
condMutex := sync.Mutex{}
condMutex.Lock()
requester.cond = sync.NewCond(&condMutex)
requester.finishCond = make(chan struct{})
}
// Locks the requester
func (requester *MyRequester) Lock() {
requester.mutex.Lock()
}
// Unlocks the requester
func (requester *MyRequester) Unlock() {
requester.mutex.Unlock()
}
// Adds a Request for execution. It will be executed if necessary at the first possible time
func (requester *MyRequester) AddRequest(request Request) {
requester.Lock()
defer requester.Unlock()
if requester.running {
requester.queue = append(requester.queue, request)
requester.cond.Signal()
}
}
func (requester *MyRequester) hasNoRequests() bool {
return len(requester.queue) == 0 && len(requester.executionPool) == 0
}
// Stops MyRequester. All pending requests will be executed
func (requester *MyRequester) Stop() {
requester.Lock()
if requester.running {
requester.running = false
}
requester.cond.Signal()
if !requester.hasNoRequests() {
requester.Unlock()
<-requester.finishCond
} else {
requester.Unlock()
}
}
// Waits for Requests and executes them or takes the result from the cache
func (requester *MyRequester) start() {
for {
requester.Lock()
hasNoRequests := requester.hasNoRequests()
if !requester.running && hasNoRequests {
requester.Unlock()
close(requester.finishCond)
break
} else if len(requester.queue) == 0 {
requester.Unlock()
requester.cond.Wait()
} else {
requester.Unlock()
}
requester.executeRequest()
}
}
// finds the first available request and executes it
func (requester *MyRequester) executeRequest() {
requester.Lock()
defer requester.Unlock()
for i := 0; i < len(requester.queue); i++ {
request := requester.queue[i]
id := request.ID()
// check if it is cached
executionResult, ok := requester.cache[id]
if ok {
request.SetResult(executionResult.result, executionResult.err)
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
break
}
// check if request of the same type is executed right now
_, executedNow := requester.executionPool[id]
if executedNow {
continue
}
// request is not cached and is not executed right now
// remove the request if the requester is stopped
if !requester.running {
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
break
}
// the requester is running - execute the request
// add the request to the execution pool if possible
if len(requester.executionPool) < requester.throttleSize {
requester.executionPool[id] = &request
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
go requester.doExecute(request)
break
}
}
}
func (requester *MyRequester) doExecute(request Request) {
result, err := request.Run()
if request.Cacheable() {
requester.addToCache(request.ID(), result, err)
}
requester.Lock()
defer requester.Unlock()
// remove the request from the execution pool
delete(requester.executionPool, request.ID())
if !requester.running && requester.hasNoRequests() {
requester.cond.Signal()
}
}
// Adds the result of Request's execution to the cache
func (requester *MyRequester) addToCache(id string, result interface{}, err error) {
executionResult := ExecutionResult{result: result, err: err}
requester.Lock()
defer requester.Unlock()
if len(requester.cachedIds) == requester.cacheSize {
removeId := requester.cachedIds[0]
requester.cachedIds = requester.cachedIds[1:]
delete(requester.cache, removeId)
}
requester.cachedIds = append(requester.cachedIds, id)
requester.cache[id] = executionResult
}
type ExecutionResult struct {
result interface{}
err error
}

Лог от изпълнението

PASS
ok  	_/tmp/d20160101-5892-1wxnl42	0.003s
PASS
ok  	_/tmp/d20160101-5892-1wxnl42	0.003s
PASS
ok  	_/tmp/d20160101-5892-1wxnl42	0.104s
PASS
ok  	_/tmp/d20160101-5892-1wxnl42	0.003s
PASS
ok  	_/tmp/d20160101-5892-1wxnl42	0.003s
PASS
ok  	_/tmp/d20160101-5892-1wxnl42	0.043s
panic: test timed out after 1s

goroutine 9 [running]:
testing.startAlarm.func1()
	/usr/local/go/src/testing/testing.go:703 +0x132
created by time.goFunc
	/usr/local/go/src/time/sleep.go:129 +0x3a

goroutine 1 [chan receive]:
testing.RunTests(0x5e2d50, 0x670e20, 0xd, 0xd, 0x1)
	/usr/local/go/src/testing/testing.go:562 +0x8ad
testing.(*M).Run(0xc82003fef8, 0xc820010650)
	/usr/local/go/src/testing/testing.go:494 +0x70
main.main()
	_/tmp/d20160101-5892-1wxnl42/_test/_testmain.go:78 +0x116

goroutine 6 [chan receive]:
_/tmp/d20160101-5892-1wxnl42.TestCacheSize(0xc82008c000)
	/tmp/d20160101-5892-1wxnl42/solution_test.go:299 +0x982
testing.tRunner(0xc82008c000, 0x670eb0)
	/usr/local/go/src/testing/testing.go:456 +0x98
created by testing.RunTests
	/usr/local/go/src/testing/testing.go:561 +0x86d

goroutine 7 [semacquire]:
sync.runtime_Syncsemacquire(0xc820016750)
	/usr/local/go/src/runtime/sema.go:237 +0x201
sync.(*Cond).Wait(0xc820016740)
	/usr/local/go/src/sync/cond.go:62 +0x9b
_/tmp/d20160101-5892-1wxnl42.(*MyRequester).start(0xc820012380)
	/tmp/d20160101-5892-1wxnl42/solution.go:125 +0xa0
created by _/tmp/d20160101-5892-1wxnl42.NewRequester
	/tmp/d20160101-5892-1wxnl42/solution.go:42 +0x91
exit status 2
FAIL	_/tmp/d20160101-5892-1wxnl42	1.006s
PASS
ok  	_/tmp/d20160101-5892-1wxnl42	0.045s
PASS
ok  	_/tmp/d20160101-5892-1wxnl42	0.054s
PASS
ok  	_/tmp/d20160101-5892-1wxnl42	0.217s
PASS
ok  	_/tmp/d20160101-5892-1wxnl42	0.034s
PASS
ok  	_/tmp/d20160101-5892-1wxnl42	0.053s
PASS
ok  	_/tmp/d20160101-5892-1wxnl42	0.114s

История (7 версии и 4 коментара)

Катя обнови решението на 28.12.2015 18:05 (преди над 2 години)

+package main
+
+import (
+ "sync"
+)
+
+type Request interface {
+ // return the id if the Request. If two Requests are the same, they have the same id
+ ID() string
+
+ // Blocks while it executes the Request
+ // Returns the result from the execution or an error
+ // The result and the error shoukd not be passed to SetResult
+ // of the current request - They are stored internally before they are returned
+ Run() (result interface{}, err error)
+
+ // Returns a flag if the Request is cacheble
+ // Has an unidentified behaviour id the called before `Run`
+ Cacheable() bool
+
+ // Sets the result of the Request
+ // Should not be called for Request for which `Run` has been called.
+ SetResult(result interface{}, err error)
+}
+
+type Requester interface {
+ // Adds request and executes it if this us necessary at the first possible time
+ AddRequest(request Request)
+
+ // Stops the Requester. Waits all started Requests and call `SetResult` for all requests
+ // that for which a same type Request has been executed
+ // No new Request should be added at this time. No Requsts should be queued for calling
+ // of `SetResult`
+ Stop()
+}
+
+// Returns a new Requester, which cashes the responses of cacheSize Requests
+// and executed no more than throttleSize Requests at the same time
+func NewRequester(cacheSize int, throttleSize int) Requester {
+ requester := &MyRequester{cacheSize: cacheSize, throttleSize: throttleSize}
+ requester.init()
+ go requester.start()
+
+ return requester
+}
+
+// Implemenation of Requester interface
+type MyRequester struct {
+ cacheSize int
+ throttleSize int
+ running bool
+ mutex sync.Mutex
+ queue []Request
+ cache map[string]ExecutionResult
+ cachedIds []string
+ executionPool map[string]*Request
+ cond *sync.Cond
+}
+
+// Initialises all fields of MyRequester
+func (requester *MyRequester) init() {
+ requester.running = true
+ requester.mutex = sync.Mutex{}
+ requester.queue = make([]Request, 0)
+ requester.cache = make(map[string]ExecutionResult, 0)
+ requester.cachedIds = make([]string, 0)
+ requester.executionPool = make(map[string]*Request)
+ condMutex := sync.Mutex{}
+ condMutex.Lock()
+ requester.cond = sync.NewCond(&condMutex)
+}
+
+// Adds a Request for execution. It will be executed if necessary at the first possible time
+func (requester *MyRequester) AddRequest(request Request) {
+ if requester.running {
+ requester.mutex.Lock()
+ defer func() {
+ requester.mutex.Unlock()
+ }()
+ requester.queue = append(requester.queue, request)
+ requester.cond.Signal()
+ }
+}
+
+// Stops MyRequester. All pending requests will be executed
+func (requester *MyRequester) Stop() {
+ requester.mutex.Lock()
+ defer func() {
+ requester.mutex.Unlock()
+ }()
+ if requester.running {
+ requester.running = false
+ }
+ requester.cond.Signal()
+}
+
+// Waits for Requests and executes them or takes the result from the cache
+func (requester *MyRequester) start() {
+ for {
+ requester.mutex.Lock()
+ if !requester.running && len(requester.queue) == 0 {
+ requester.mutex.Unlock()
+ break
+ } else if len(requester.queue) == 0 {
+ requester.mutex.Unlock()
+ requester.cond.Wait()
+ } else {
+ requester.mutex.Unlock()
+ }
+
+ requester.executeRequest()
+ }
+}
+
+// finds the first available request and executes it
+func (requester *MyRequester) executeRequest() {
+ requester.mutex.Lock()
+ defer func() {
+ requester.mutex.Unlock()
+ }()
+ for i := 0; i < len(requester.queue); i++ {
+ request := requester.queue[i]
+ id := request.ID()
+ // check if it is cached
+ executionResult, ok := requester.cache[id]
+ if ok {
+ request.SetResult(executionResult.result, executionResult.err)
+ requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
+ break
+ }
+
+ // check if request of the same type is executed right now
+ _, executedNow := requester.executionPool[id]
+ if executedNow {
+ continue
+ }
+
+ // add the request to the execution pool if possible
+ if len(requester.executionPool) < requester.throttleSize {
+ requester.executionPool[id] = &request
+ requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
+ go requester.doExecute(request)
+ break
+ }
+ }
+}
+
+func (requester *MyRequester) doExecute(request Request) {
+ result, err := request.Run()
+ if request.Cacheable() {
+ requester.addToCache(request.ID(), result, err)
+ }
+ requester.mutex.Lock()
+ // remove the request from the execution pool
+ delete(requester.executionPool, request.ID())
+ requester.mutex.Unlock()
+}
+
+// Adds the result of Request's execution to the cache
+func (requester *MyRequester) addToCache(id string, result interface{}, err error) {
+ executionResult := ExecutionResult{result: result, err: err}
+ requester.mutex.Lock()
+ defer func() {
+ requester.mutex.Unlock()
+ }()
+ if len(requester.cachedIds) == requester.cacheSize {
+ removeId := requester.cachedIds[0]
+ requester.cachedIds = requester.cachedIds[1:]
+ delete(requester.cache, removeId)
+ }
+ requester.cachedIds = append(requester.cachedIds, id)
+ requester.cache[id] = executionResult
+}
+
+type ExecutionResult struct {
+ result interface{}
+ err error
+}

Добър вечер,

на бързо и на градус, мога да кажа че:

  1. defer приема извикване на функция така че може и само defer requester.mutex.Unlock().
  2. Принципно ако ще се локва целия обект както изглежда е при теб е прието просто да ембеднеш sync.Mutex-a и да е requester.Lock(). Не че е грешно иначе, просто е по go-шки (според мен) с ембеднат mutex.
  3. Решението е интересно (и навярно не лошo) използване на sync.Cond, което изглежда да работи. Нямам спомен да съм го ползвал (sync.Cond) но винаги съм си мислел че в някакви такива ситуации би бил полезен. Браво.
  4. Явно не съм обяснил, като хората, че Stop изчаква пълното и тотално спиране на Requester-а. Тоест изчаква все пак да завършат всички започнати заявки.
  5. Прочети отново условието за Stop, главно в частта кои заявки биват завършвани, започвани и довършвани и кои не :D

Лека вечер, приятно кодене и Приятна Нова Година.

Катя обнови решението на 29.12.2015 09:22 (преди над 2 години)

package main
import (
"sync"
)
type Request interface {
// return the id if the Request. If two Requests are the same, they have the same id
ID() string
// Blocks while it executes the Request
// Returns the result from the execution or an error
// The result and the error shoukd not be passed to SetResult
// of the current request - They are stored internally before they are returned
Run() (result interface{}, err error)
// Returns a flag if the Request is cacheble
// Has an unidentified behaviour id the called before `Run`
Cacheable() bool
// Sets the result of the Request
// Should not be called for Request for which `Run` has been called.
SetResult(result interface{}, err error)
}
type Requester interface {
// Adds request and executes it if this us necessary at the first possible time
AddRequest(request Request)
// Stops the Requester. Waits all started Requests and call `SetResult` for all requests
// that for which a same type Request has been executed
// No new Request should be added at this time. No Requsts should be queued for calling
// of `SetResult`
Stop()
}
// Returns a new Requester, which cashes the responses of cacheSize Requests
// and executed no more than throttleSize Requests at the same time
func NewRequester(cacheSize int, throttleSize int) Requester {
requester := &MyRequester{cacheSize: cacheSize, throttleSize: throttleSize}
requester.init()
go requester.start()
return requester
}
// Implemenation of Requester interface
type MyRequester struct {
cacheSize int
throttleSize int
running bool
mutex sync.Mutex
queue []Request
cache map[string]ExecutionResult
cachedIds []string
executionPool map[string]*Request
cond *sync.Cond
+ finishCond *sync.Cond
}
// Initialises all fields of MyRequester
func (requester *MyRequester) init() {
requester.running = true
requester.mutex = sync.Mutex{}
requester.queue = make([]Request, 0)
requester.cache = make(map[string]ExecutionResult, 0)
requester.cachedIds = make([]string, 0)
requester.executionPool = make(map[string]*Request)
condMutex := sync.Mutex{}
condMutex.Lock()
requester.cond = sync.NewCond(&condMutex)
+ finishMutex := sync.Mutex{}
+ finishMutex.Lock()
+ requester.finishCond = sync.NewCond(&finishMutex)
}
+// Locks the requester
+func (requester *MyRequester) Lock() {
+ requester.mutex.Lock()
+}
+
+// Unlocks the requester
+func (requester *MyRequester) Unlock() {
+ requester.mutex.Unlock()
+}
+
// Adds a Request for execution. It will be executed if necessary at the first possible time
func (requester *MyRequester) AddRequest(request Request) {
+ requester.Lock()
+ defer requester.Unlock()
if requester.running {
- requester.mutex.Lock()
- defer func() {
- requester.mutex.Unlock()
- }()
requester.queue = append(requester.queue, request)
requester.cond.Signal()
}
}
+func (requester *MyRequester) hasNoRequests() bool {
+ return len(requester.queue) == 0 && len(requester.executionPool) == 0
+}
+
// Stops MyRequester. All pending requests will be executed
func (requester *MyRequester) Stop() {
- requester.mutex.Lock()
- defer func() {
- requester.mutex.Unlock()
- }()
+ requester.Lock()
if requester.running {
requester.running = false
}
requester.cond.Signal()
+ hasNoRequests := requester.hasNoRequests()
+ if !hasNoRequests {
+ requester.Unlock()
+
+ // wait to finish the started requests
+ requester.finishCond.Wait()
+ } else {
+ requester.Unlock()
+ }
}
// Waits for Requests and executes them or takes the result from the cache
func (requester *MyRequester) start() {
for {
- requester.mutex.Lock()
- if !requester.running && len(requester.queue) == 0 {
- requester.mutex.Unlock()
+ requester.Lock()
+ hasNoRequests := requester.hasNoRequests()
+ if !requester.running && hasNoRequests {
+ requester.Unlock()
+ requester.finishCond.Signal()
break
- } else if len(requester.queue) == 0 {
- requester.mutex.Unlock()
+ } else if hasNoRequests {
+ requester.Unlock()
requester.cond.Wait()
} else {
- requester.mutex.Unlock()
+ requester.Unlock()
}
requester.executeRequest()
}
}
// finds the first available request and executes it
func (requester *MyRequester) executeRequest() {
- requester.mutex.Lock()
- defer func() {
- requester.mutex.Unlock()
- }()
+ requester.Lock()
+ defer requester.Unlock()
for i := 0; i < len(requester.queue); i++ {
request := requester.queue[i]
id := request.ID()
// check if it is cached
executionResult, ok := requester.cache[id]
if ok {
request.SetResult(executionResult.result, executionResult.err)
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
break
}
// check if request of the same type is executed right now
_, executedNow := requester.executionPool[id]
if executedNow {
continue
}
// add the request to the execution pool if possible
if len(requester.executionPool) < requester.throttleSize {
requester.executionPool[id] = &request
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
go requester.doExecute(request)
break
}
}
}
func (requester *MyRequester) doExecute(request Request) {
result, err := request.Run()
if request.Cacheable() {
requester.addToCache(request.ID(), result, err)
}
- requester.mutex.Lock()
+ requester.Lock()
// remove the request from the execution pool
delete(requester.executionPool, request.ID())
- requester.mutex.Unlock()
+ requester.Unlock()
}
// Adds the result of Request's execution to the cache
func (requester *MyRequester) addToCache(id string, result interface{}, err error) {
executionResult := ExecutionResult{result: result, err: err}
- requester.mutex.Lock()
- defer func() {
- requester.mutex.Unlock()
- }()
+ requester.Lock()
+ defer requester.Unlock()
if len(requester.cachedIds) == requester.cacheSize {
removeId := requester.cachedIds[0]
requester.cachedIds = requester.cachedIds[1:]
delete(requester.cache, removeId)
}
requester.cachedIds = append(requester.cachedIds, id)
requester.cache[id] = executionResult
}
type ExecutionResult struct {
result interface{}
err error
}

Катя обнови решението на 29.12.2015 10:00 (преди над 2 години)

package main
import (
"sync"
)
type Request interface {
// return the id if the Request. If two Requests are the same, they have the same id
ID() string
// Blocks while it executes the Request
// Returns the result from the execution or an error
// The result and the error shoukd not be passed to SetResult
// of the current request - They are stored internally before they are returned
Run() (result interface{}, err error)
// Returns a flag if the Request is cacheble
// Has an unidentified behaviour id the called before `Run`
Cacheable() bool
// Sets the result of the Request
// Should not be called for Request for which `Run` has been called.
SetResult(result interface{}, err error)
}
type Requester interface {
// Adds request and executes it if this us necessary at the first possible time
AddRequest(request Request)
// Stops the Requester. Waits all started Requests and call `SetResult` for all requests
// that for which a same type Request has been executed
// No new Request should be added at this time. No Requsts should be queued for calling
// of `SetResult`
Stop()
}
// Returns a new Requester, which cashes the responses of cacheSize Requests
// and executed no more than throttleSize Requests at the same time
func NewRequester(cacheSize int, throttleSize int) Requester {
requester := &MyRequester{cacheSize: cacheSize, throttleSize: throttleSize}
requester.init()
go requester.start()
return requester
}
// Implemenation of Requester interface
type MyRequester struct {
cacheSize int
throttleSize int
running bool
mutex sync.Mutex
queue []Request
cache map[string]ExecutionResult
cachedIds []string
executionPool map[string]*Request
cond *sync.Cond
- finishCond *sync.Cond
+ finishCond chan (struct{})
}
// Initialises all fields of MyRequester
func (requester *MyRequester) init() {
requester.running = true
requester.mutex = sync.Mutex{}
requester.queue = make([]Request, 0)
requester.cache = make(map[string]ExecutionResult, 0)
requester.cachedIds = make([]string, 0)
requester.executionPool = make(map[string]*Request)
condMutex := sync.Mutex{}
condMutex.Lock()
requester.cond = sync.NewCond(&condMutex)
- finishMutex := sync.Mutex{}
- finishMutex.Lock()
- requester.finishCond = sync.NewCond(&finishMutex)
+ requester.finishCond = make(chan struct{})
}
// Locks the requester
func (requester *MyRequester) Lock() {
requester.mutex.Lock()
}
// Unlocks the requester
func (requester *MyRequester) Unlock() {
requester.mutex.Unlock()
}
// Adds a Request for execution. It will be executed if necessary at the first possible time
func (requester *MyRequester) AddRequest(request Request) {
requester.Lock()
defer requester.Unlock()
if requester.running {
requester.queue = append(requester.queue, request)
requester.cond.Signal()
}
}
func (requester *MyRequester) hasNoRequests() bool {
return len(requester.queue) == 0 && len(requester.executionPool) == 0
}
// Stops MyRequester. All pending requests will be executed
func (requester *MyRequester) Stop() {
requester.Lock()
if requester.running {
requester.running = false
}
requester.cond.Signal()
- hasNoRequests := requester.hasNoRequests()
- if !hasNoRequests {
+ if !requester.hasNoRequests() {
requester.Unlock()
-
- // wait to finish the started requests
- requester.finishCond.Wait()
+ <-requester.finishCond
} else {
requester.Unlock()
}
}
// Waits for Requests and executes them or takes the result from the cache
func (requester *MyRequester) start() {
for {
requester.Lock()
hasNoRequests := requester.hasNoRequests()
if !requester.running && hasNoRequests {
requester.Unlock()
- requester.finishCond.Signal()
+ close(requester.finishCond)
break
} else if hasNoRequests {
requester.Unlock()
requester.cond.Wait()
} else {
requester.Unlock()
}
requester.executeRequest()
}
}
// finds the first available request and executes it
func (requester *MyRequester) executeRequest() {
requester.Lock()
defer requester.Unlock()
for i := 0; i < len(requester.queue); i++ {
request := requester.queue[i]
id := request.ID()
// check if it is cached
executionResult, ok := requester.cache[id]
if ok {
request.SetResult(executionResult.result, executionResult.err)
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
break
}
// check if request of the same type is executed right now
_, executedNow := requester.executionPool[id]
if executedNow {
continue
}
// add the request to the execution pool if possible
if len(requester.executionPool) < requester.throttleSize {
requester.executionPool[id] = &request
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
go requester.doExecute(request)
break
}
}
}
func (requester *MyRequester) doExecute(request Request) {
result, err := request.Run()
if request.Cacheable() {
requester.addToCache(request.ID(), result, err)
}
requester.Lock()
// remove the request from the execution pool
delete(requester.executionPool, request.ID())
requester.Unlock()
}
// Adds the result of Request's execution to the cache
func (requester *MyRequester) addToCache(id string, result interface{}, err error) {
executionResult := ExecutionResult{result: result, err: err}
requester.Lock()
defer requester.Unlock()
if len(requester.cachedIds) == requester.cacheSize {
removeId := requester.cachedIds[0]
requester.cachedIds = requester.cachedIds[1:]
delete(requester.cache, removeId)
}
requester.cachedIds = append(requester.cachedIds, id)
requester.cache[id] = executionResult
}
type ExecutionResult struct {
result interface{}
err error
}

Катя обнови решението на 29.12.2015 16:56 (преди над 2 години)

package main
import (
"sync"
)
type Request interface {
// return the id if the Request. If two Requests are the same, they have the same id
ID() string
// Blocks while it executes the Request
// Returns the result from the execution or an error
// The result and the error shoukd not be passed to SetResult
// of the current request - They are stored internally before they are returned
Run() (result interface{}, err error)
// Returns a flag if the Request is cacheble
// Has an unidentified behaviour id the called before `Run`
Cacheable() bool
// Sets the result of the Request
// Should not be called for Request for which `Run` has been called.
SetResult(result interface{}, err error)
}
type Requester interface {
// Adds request and executes it if this us necessary at the first possible time
AddRequest(request Request)
// Stops the Requester. Waits all started Requests and call `SetResult` for all requests
// that for which a same type Request has been executed
// No new Request should be added at this time. No Requsts should be queued for calling
// of `SetResult`
Stop()
}
// Returns a new Requester, which cashes the responses of cacheSize Requests
// and executed no more than throttleSize Requests at the same time
func NewRequester(cacheSize int, throttleSize int) Requester {
requester := &MyRequester{cacheSize: cacheSize, throttleSize: throttleSize}
requester.init()
go requester.start()
return requester
}
// Implemenation of Requester interface
type MyRequester struct {
cacheSize int
throttleSize int
running bool
mutex sync.Mutex
queue []Request
cache map[string]ExecutionResult
cachedIds []string
executionPool map[string]*Request
cond *sync.Cond
finishCond chan (struct{})
}
// Initialises all fields of MyRequester
func (requester *MyRequester) init() {
requester.running = true
requester.mutex = sync.Mutex{}
requester.queue = make([]Request, 0)
requester.cache = make(map[string]ExecutionResult, 0)
requester.cachedIds = make([]string, 0)
requester.executionPool = make(map[string]*Request)
condMutex := sync.Mutex{}
condMutex.Lock()
requester.cond = sync.NewCond(&condMutex)
requester.finishCond = make(chan struct{})
}
// Locks the requester
func (requester *MyRequester) Lock() {
requester.mutex.Lock()
}
// Unlocks the requester
func (requester *MyRequester) Unlock() {
requester.mutex.Unlock()
}
// Adds a Request for execution. It will be executed if necessary at the first possible time
func (requester *MyRequester) AddRequest(request Request) {
requester.Lock()
defer requester.Unlock()
if requester.running {
requester.queue = append(requester.queue, request)
requester.cond.Signal()
}
}
func (requester *MyRequester) hasNoRequests() bool {
return len(requester.queue) == 0 && len(requester.executionPool) == 0
}
// Stops MyRequester. All pending requests will be executed
func (requester *MyRequester) Stop() {
requester.Lock()
if requester.running {
requester.running = false
}
requester.cond.Signal()
if !requester.hasNoRequests() {
requester.Unlock()
<-requester.finishCond
} else {
requester.Unlock()
}
}
// Waits for Requests and executes them or takes the result from the cache
func (requester *MyRequester) start() {
for {
requester.Lock()
hasNoRequests := requester.hasNoRequests()
if !requester.running && hasNoRequests {
requester.Unlock()
close(requester.finishCond)
break
} else if hasNoRequests {
requester.Unlock()
requester.cond.Wait()
} else {
requester.Unlock()
}
requester.executeRequest()
}
}
// finds the first available request and executes it
func (requester *MyRequester) executeRequest() {
requester.Lock()
defer requester.Unlock()
for i := 0; i < len(requester.queue); i++ {
request := requester.queue[i]
id := request.ID()
// check if it is cached
executionResult, ok := requester.cache[id]
if ok {
request.SetResult(executionResult.result, executionResult.err)
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
break
}
// check if request of the same type is executed right now
_, executedNow := requester.executionPool[id]
if executedNow {
continue
}
+ // request is not cached and is not executed right now
+ // remove the request if the requester is stopped
+ if !requester.running {
+ requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
+ break
+ }
+
+ // the requester is running - execute the request
// add the request to the execution pool if possible
if len(requester.executionPool) < requester.throttleSize {
requester.executionPool[id] = &request
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
go requester.doExecute(request)
break
}
}
}
func (requester *MyRequester) doExecute(request Request) {
result, err := request.Run()
if request.Cacheable() {
requester.addToCache(request.ID(), result, err)
}
requester.Lock()
// remove the request from the execution pool
delete(requester.executionPool, request.ID())
requester.Unlock()
}
// Adds the result of Request's execution to the cache
func (requester *MyRequester) addToCache(id string, result interface{}, err error) {
executionResult := ExecutionResult{result: result, err: err}
requester.Lock()
defer requester.Unlock()
if len(requester.cachedIds) == requester.cacheSize {
removeId := requester.cachedIds[0]
requester.cachedIds = requester.cachedIds[1:]
delete(requester.cache, removeId)
}
requester.cachedIds = append(requester.cachedIds, id)
requester.cache[id] = executionResult
}
type ExecutionResult struct {
result interface{}
err error
}

Катя обнови решението на 30.12.2015 11:27 (преди над 2 години)

package main
import (
"sync"
)
type Request interface {
// return the id if the Request. If two Requests are the same, they have the same id
ID() string
// Blocks while it executes the Request
// Returns the result from the execution or an error
// The result and the error shoukd not be passed to SetResult
// of the current request - They are stored internally before they are returned
Run() (result interface{}, err error)
// Returns a flag if the Request is cacheble
// Has an unidentified behaviour id the called before `Run`
Cacheable() bool
// Sets the result of the Request
// Should not be called for Request for which `Run` has been called.
SetResult(result interface{}, err error)
}
type Requester interface {
// Adds request and executes it if this us necessary at the first possible time
AddRequest(request Request)
// Stops the Requester. Waits all started Requests and call `SetResult` for all requests
// that for which a same type Request has been executed
// No new Request should be added at this time. No Requsts should be queued for calling
// of `SetResult`
Stop()
}
// Returns a new Requester, which cashes the responses of cacheSize Requests
// and executed no more than throttleSize Requests at the same time
func NewRequester(cacheSize int, throttleSize int) Requester {
requester := &MyRequester{cacheSize: cacheSize, throttleSize: throttleSize}
requester.init()
go requester.start()
return requester
}
// Implemenation of Requester interface
type MyRequester struct {
cacheSize int
throttleSize int
running bool
mutex sync.Mutex
queue []Request
cache map[string]ExecutionResult
cachedIds []string
executionPool map[string]*Request
cond *sync.Cond
finishCond chan (struct{})
}
// Initialises all fields of MyRequester
func (requester *MyRequester) init() {
requester.running = true
requester.mutex = sync.Mutex{}
requester.queue = make([]Request, 0)
requester.cache = make(map[string]ExecutionResult, 0)
requester.cachedIds = make([]string, 0)
requester.executionPool = make(map[string]*Request)
condMutex := sync.Mutex{}
condMutex.Lock()
requester.cond = sync.NewCond(&condMutex)
requester.finishCond = make(chan struct{})
}
// Locks the requester
func (requester *MyRequester) Lock() {
requester.mutex.Lock()
}
// Unlocks the requester
func (requester *MyRequester) Unlock() {
requester.mutex.Unlock()
}
// Adds a Request for execution. It will be executed if necessary at the first possible time
func (requester *MyRequester) AddRequest(request Request) {
requester.Lock()
defer requester.Unlock()
if requester.running {
requester.queue = append(requester.queue, request)
requester.cond.Signal()
}
}
func (requester *MyRequester) hasNoRequests() bool {
return len(requester.queue) == 0 && len(requester.executionPool) == 0
}
// Stops MyRequester. All pending requests will be executed
func (requester *MyRequester) Stop() {
requester.Lock()
if requester.running {
requester.running = false
}
requester.cond.Signal()
if !requester.hasNoRequests() {
requester.Unlock()
<-requester.finishCond
} else {
requester.Unlock()
}
}
// Waits for Requests and executes them or takes the result from the cache
func (requester *MyRequester) start() {
for {
requester.Lock()
hasNoRequests := requester.hasNoRequests()
if !requester.running && hasNoRequests {
requester.Unlock()
close(requester.finishCond)
break
- } else if hasNoRequests {
+ } else if len(requester.queue) == 0 {
requester.Unlock()
requester.cond.Wait()
} else {
requester.Unlock()
}
requester.executeRequest()
}
}
// finds the first available request and executes it
func (requester *MyRequester) executeRequest() {
requester.Lock()
defer requester.Unlock()
for i := 0; i < len(requester.queue); i++ {
request := requester.queue[i]
id := request.ID()
// check if it is cached
executionResult, ok := requester.cache[id]
if ok {
request.SetResult(executionResult.result, executionResult.err)
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
break
}
// check if request of the same type is executed right now
_, executedNow := requester.executionPool[id]
if executedNow {
continue
}
// request is not cached and is not executed right now
// remove the request if the requester is stopped
if !requester.running {
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
break
}
// the requester is running - execute the request
// add the request to the execution pool if possible
if len(requester.executionPool) < requester.throttleSize {
requester.executionPool[id] = &request
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
go requester.doExecute(request)
break
}
}
}
func (requester *MyRequester) doExecute(request Request) {
result, err := request.Run()
if request.Cacheable() {
requester.addToCache(request.ID(), result, err)
}
requester.Lock()
// remove the request from the execution pool
delete(requester.executionPool, request.ID())
requester.Unlock()
}
// Adds the result of Request's execution to the cache
func (requester *MyRequester) addToCache(id string, result interface{}, err error) {
executionResult := ExecutionResult{result: result, err: err}
requester.Lock()
defer requester.Unlock()
if len(requester.cachedIds) == requester.cacheSize {
removeId := requester.cachedIds[0]
requester.cachedIds = requester.cachedIds[1:]
delete(requester.cache, removeId)
}
requester.cachedIds = append(requester.cachedIds, id)
requester.cache[id] = executionResult
}
type ExecutionResult struct {
result interface{}
err error
}

Катя обнови решението на 30.12.2015 11:35 (преди над 2 години)

package main
import (
"sync"
)
type Request interface {
// return the id if the Request. If two Requests are the same, they have the same id
ID() string
// Blocks while it executes the Request
// Returns the result from the execution or an error
// The result and the error shoukd not be passed to SetResult
// of the current request - They are stored internally before they are returned
Run() (result interface{}, err error)
// Returns a flag if the Request is cacheble
// Has an unidentified behaviour id the called before `Run`
Cacheable() bool
// Sets the result of the Request
// Should not be called for Request for which `Run` has been called.
SetResult(result interface{}, err error)
}
type Requester interface {
// Adds request and executes it if this us necessary at the first possible time
AddRequest(request Request)
// Stops the Requester. Waits all started Requests and call `SetResult` for all requests
// that for which a same type Request has been executed
// No new Request should be added at this time. No Requsts should be queued for calling
// of `SetResult`
Stop()
}
// Returns a new Requester, which cashes the responses of cacheSize Requests
// and executed no more than throttleSize Requests at the same time
func NewRequester(cacheSize int, throttleSize int) Requester {
requester := &MyRequester{cacheSize: cacheSize, throttleSize: throttleSize}
requester.init()
go requester.start()
return requester
}
// Implemenation of Requester interface
type MyRequester struct {
cacheSize int
throttleSize int
running bool
mutex sync.Mutex
queue []Request
cache map[string]ExecutionResult
cachedIds []string
executionPool map[string]*Request
cond *sync.Cond
finishCond chan (struct{})
}
// Initialises all fields of MyRequester
func (requester *MyRequester) init() {
requester.running = true
requester.mutex = sync.Mutex{}
requester.queue = make([]Request, 0)
requester.cache = make(map[string]ExecutionResult, 0)
requester.cachedIds = make([]string, 0)
requester.executionPool = make(map[string]*Request)
condMutex := sync.Mutex{}
condMutex.Lock()
requester.cond = sync.NewCond(&condMutex)
requester.finishCond = make(chan struct{})
}
// Locks the requester
func (requester *MyRequester) Lock() {
requester.mutex.Lock()
}
// Unlocks the requester
func (requester *MyRequester) Unlock() {
requester.mutex.Unlock()
}
// Adds a Request for execution. It will be executed if necessary at the first possible time
func (requester *MyRequester) AddRequest(request Request) {
requester.Lock()
defer requester.Unlock()
if requester.running {
requester.queue = append(requester.queue, request)
requester.cond.Signal()
}
}
func (requester *MyRequester) hasNoRequests() bool {
return len(requester.queue) == 0 && len(requester.executionPool) == 0
}
// Stops MyRequester. All pending requests will be executed
func (requester *MyRequester) Stop() {
requester.Lock()
if requester.running {
requester.running = false
}
requester.cond.Signal()
if !requester.hasNoRequests() {
requester.Unlock()
<-requester.finishCond
} else {
requester.Unlock()
}
}
// Waits for Requests and executes them or takes the result from the cache
func (requester *MyRequester) start() {
for {
requester.Lock()
hasNoRequests := requester.hasNoRequests()
if !requester.running && hasNoRequests {
requester.Unlock()
close(requester.finishCond)
break
- } else if len(requester.queue) == 0 {
+ } else if requester.running && len(requester.queue) == 0 {
requester.Unlock()
requester.cond.Wait()
} else {
requester.Unlock()
}
requester.executeRequest()
}
}
// finds the first available request and executes it
func (requester *MyRequester) executeRequest() {
requester.Lock()
defer requester.Unlock()
for i := 0; i < len(requester.queue); i++ {
request := requester.queue[i]
id := request.ID()
// check if it is cached
executionResult, ok := requester.cache[id]
if ok {
request.SetResult(executionResult.result, executionResult.err)
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
break
}
// check if request of the same type is executed right now
_, executedNow := requester.executionPool[id]
if executedNow {
continue
}
// request is not cached and is not executed right now
// remove the request if the requester is stopped
if !requester.running {
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
break
}
// the requester is running - execute the request
// add the request to the execution pool if possible
if len(requester.executionPool) < requester.throttleSize {
requester.executionPool[id] = &request
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
go requester.doExecute(request)
break
}
}
}
func (requester *MyRequester) doExecute(request Request) {
result, err := request.Run()
if request.Cacheable() {
requester.addToCache(request.ID(), result, err)
}
requester.Lock()
// remove the request from the execution pool
delete(requester.executionPool, request.ID())
requester.Unlock()
}
// Adds the result of Request's execution to the cache
func (requester *MyRequester) addToCache(id string, result interface{}, err error) {
executionResult := ExecutionResult{result: result, err: err}
requester.Lock()
defer requester.Unlock()
if len(requester.cachedIds) == requester.cacheSize {
removeId := requester.cachedIds[0]
requester.cachedIds = requester.cachedIds[1:]
delete(requester.cache, removeId)
}
requester.cachedIds = append(requester.cachedIds, id)
requester.cache[id] = executionResult
}
type ExecutionResult struct {
result interface{}
err error
}

Катя обнови решението на 30.12.2015 12:13 (преди над 2 години)

package main
import (
"sync"
)
type Request interface {
// return the id if the Request. If two Requests are the same, they have the same id
ID() string
// Blocks while it executes the Request
// Returns the result from the execution or an error
// The result and the error shoukd not be passed to SetResult
// of the current request - They are stored internally before they are returned
Run() (result interface{}, err error)
// Returns a flag if the Request is cacheble
// Has an unidentified behaviour id the called before `Run`
Cacheable() bool
// Sets the result of the Request
// Should not be called for Request for which `Run` has been called.
SetResult(result interface{}, err error)
}
type Requester interface {
// Adds request and executes it if this us necessary at the first possible time
AddRequest(request Request)
// Stops the Requester. Waits all started Requests and call `SetResult` for all requests
// that for which a same type Request has been executed
// No new Request should be added at this time. No Requsts should be queued for calling
// of `SetResult`
Stop()
}
// Returns a new Requester, which cashes the responses of cacheSize Requests
// and executed no more than throttleSize Requests at the same time
func NewRequester(cacheSize int, throttleSize int) Requester {
requester := &MyRequester{cacheSize: cacheSize, throttleSize: throttleSize}
requester.init()
go requester.start()
return requester
}
// Implemenation of Requester interface
type MyRequester struct {
cacheSize int
throttleSize int
running bool
mutex sync.Mutex
queue []Request
cache map[string]ExecutionResult
cachedIds []string
executionPool map[string]*Request
cond *sync.Cond
finishCond chan (struct{})
}
// Initialises all fields of MyRequester
func (requester *MyRequester) init() {
requester.running = true
requester.mutex = sync.Mutex{}
requester.queue = make([]Request, 0)
requester.cache = make(map[string]ExecutionResult, 0)
requester.cachedIds = make([]string, 0)
requester.executionPool = make(map[string]*Request)
condMutex := sync.Mutex{}
condMutex.Lock()
requester.cond = sync.NewCond(&condMutex)
requester.finishCond = make(chan struct{})
}
// Locks the requester
func (requester *MyRequester) Lock() {
requester.mutex.Lock()
}
// Unlocks the requester
func (requester *MyRequester) Unlock() {
requester.mutex.Unlock()
}
// Adds a Request for execution. It will be executed if necessary at the first possible time
func (requester *MyRequester) AddRequest(request Request) {
requester.Lock()
defer requester.Unlock()
if requester.running {
requester.queue = append(requester.queue, request)
requester.cond.Signal()
}
}
func (requester *MyRequester) hasNoRequests() bool {
return len(requester.queue) == 0 && len(requester.executionPool) == 0
}
// Stops MyRequester. All pending requests will be executed
func (requester *MyRequester) Stop() {
requester.Lock()
if requester.running {
requester.running = false
}
requester.cond.Signal()
if !requester.hasNoRequests() {
requester.Unlock()
<-requester.finishCond
} else {
requester.Unlock()
}
}
// Waits for Requests and executes them or takes the result from the cache
func (requester *MyRequester) start() {
for {
requester.Lock()
hasNoRequests := requester.hasNoRequests()
if !requester.running && hasNoRequests {
requester.Unlock()
close(requester.finishCond)
break
- } else if requester.running && len(requester.queue) == 0 {
+ } else if len(requester.queue) == 0 {
requester.Unlock()
requester.cond.Wait()
} else {
requester.Unlock()
}
requester.executeRequest()
}
}
// finds the first available request and executes it
func (requester *MyRequester) executeRequest() {
requester.Lock()
defer requester.Unlock()
for i := 0; i < len(requester.queue); i++ {
request := requester.queue[i]
id := request.ID()
// check if it is cached
executionResult, ok := requester.cache[id]
if ok {
request.SetResult(executionResult.result, executionResult.err)
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
break
}
// check if request of the same type is executed right now
_, executedNow := requester.executionPool[id]
if executedNow {
continue
}
// request is not cached and is not executed right now
// remove the request if the requester is stopped
if !requester.running {
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
break
}
// the requester is running - execute the request
// add the request to the execution pool if possible
if len(requester.executionPool) < requester.throttleSize {
requester.executionPool[id] = &request
requester.queue = append(requester.queue[:i], requester.queue[i+1:]...)
go requester.doExecute(request)
break
}
}
}
func (requester *MyRequester) doExecute(request Request) {
result, err := request.Run()
if request.Cacheable() {
requester.addToCache(request.ID(), result, err)
}
requester.Lock()
+ defer requester.Unlock()
+
// remove the request from the execution pool
delete(requester.executionPool, request.ID())
- requester.Unlock()
+ if !requester.running && requester.hasNoRequests() {
+ requester.cond.Signal()
+ }
}
// Adds the result of Request's execution to the cache
func (requester *MyRequester) addToCache(id string, result interface{}, err error) {
executionResult := ExecutionResult{result: result, err: err}
requester.Lock()
defer requester.Unlock()
if len(requester.cachedIds) == requester.cacheSize {
removeId := requester.cachedIds[0]
requester.cachedIds = requester.cachedIds[1:]
delete(requester.cache, removeId)
}
requester.cachedIds = append(requester.cachedIds, id)
requester.cache[id] = executionResult
}
type ExecutionResult struct {
result interface{}
err error
}

Все още се въртиш безкрайно в някой случай, което ти фейлва примерния тест от форума защото отнема повече от секунда (той има 0.5 секунди спане в него но в останалите би трябвало да се побереш).