resource limits and concurrency for events and req

This commit is contained in:
0ceanSlim 2024-08-19 13:47:23 -04:00
parent 56a725c5df
commit 2b071fb7d8
7 changed files with 241 additions and 106 deletions

View File

@ -10,6 +10,12 @@ server:
max_connections: 100 max_connections: 100
max_subscriptions_per_client: 10 max_subscriptions_per_client: 10
resource_limits:
cpu_cores: 2 # Limit the number of CPU cores the application can use
memory_mb: 1024 # Cap the maximum amount of RAM in MB the application can use
heap_size_mb: 512 # Set a limit on the Go garbage collector's heap size in MB
max_goroutines: 100 # Limit the maximum number of concurrently running Go routines
rate_limit: rate_limit:
ws_limit: 100 # WebSocket messages per second ws_limit: 100 # WebSocket messages per second
ws_burst: 200 # Allowed burst of WebSocket messages ws_burst: 200 # Allowed burst of WebSocket messages

View File

@ -0,0 +1,8 @@
package config
type ResourceLimits struct {
CPUCores int `yaml:"cpu_cores"`
MemoryMB int `yaml:"memory_mb"`
HeapSizeMB int `yaml:"heap_size_mb"`
MaxGoroutines int `yaml:"max_goroutines"`
}

View File

@ -18,4 +18,5 @@ type ServerConfig struct {
KindWhitelist KindWhitelistConfig `yaml:"kind_whitelist"` KindWhitelist KindWhitelistConfig `yaml:"kind_whitelist"`
DomainWhitelist DomainWhitelistConfig `yaml:"domain_whitelist"` DomainWhitelist DomainWhitelistConfig `yaml:"domain_whitelist"`
Blacklist BlacklistConfig `yaml:"blacklist"` Blacklist BlacklistConfig `yaml:"blacklist"`
ResourceLimits ResourceLimits `yaml:"resource_limits"`
} }

26
main.go
View File

@ -25,19 +25,22 @@ func main() {
utils.EnsureFileExists("relay_metadata.json", "app/static/examples/relay_metadata.example.json") utils.EnsureFileExists("relay_metadata.json", "app/static/examples/relay_metadata.example.json")
restartChan := make(chan struct{}) restartChan := make(chan struct{})
go utils.WatchConfigFile("config.yml", restartChan) go utils.WatchConfigFile("config.yml", restartChan) // Critical goroutine
signalChan := make(chan os.Signal, 1) signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM) signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
var wg sync.WaitGroup var wg sync.WaitGroup
for { for {
wg.Add(1) wg.Add(1) // Add to WaitGroup for the server goroutine
cfg, err := config.LoadConfig("config.yml") cfg, err := config.LoadConfig("config.yml")
if err != nil { if err != nil {
log.Fatal("Error loading config: ", err) log.Fatal("Error loading config: ", err)
} }
utils.ApplyResourceLimits(&cfg.ResourceLimits) // Apply limits once before starting the server
client, err := db.InitDB(cfg) client, err := db.InitDB(cfg)
if err != nil { if err != nil {
log.Fatal("Error initializing database: ", err) log.Fatal("Error initializing database: ", err)
@ -54,22 +57,22 @@ func main() {
} }
mux := setupRoutes() mux := setupRoutes()
// Start the server
server := startServer(cfg, mux, &wg) server := startServer(cfg, mux, &wg)
select { select {
case <-restartChan: case <-restartChan:
log.Println("Restarting server...") log.Println("Restarting server...")
server.Close() // Stop the current server instance
// Close server before restart wg.Wait() // Wait for the server goroutine to finish
server.Close()
wg.Wait()
time.Sleep(3 * time.Second) time.Sleep(3 * time.Second)
case <-signalChan: case <-signalChan:
log.Println("Shutting down server...") log.Println("Shutting down server...")
server.Close() server.Close() // Stop the server
db.DisconnectDB(client) db.DisconnectDB(client) // Disconnect from MongoDB
wg.Wait() wg.Wait() // Wait for all goroutines to finish
return return
} }
} }
@ -97,13 +100,14 @@ func startServer(config *configTypes.ServerConfig, mux *http.ServeMux, wg *sync.
} }
go func() { go func() {
defer wg.Done() // Notify that the server is done shutting down defer wg.Done() // Notify that the server goroutine is done
fmt.Printf("Server is running on http://localhost%s\n", config.Server.Port) fmt.Printf("Server is running on http://localhost%s\n", config.Server.Port)
err := server.ListenAndServe() err := server.ListenAndServe()
if err != nil && err != http.ErrServerClosed { if err != nil && err != http.ErrServerClosed {
fmt.Println("Error starting server:", err) fmt.Println("Error starting server:", err)
} }
}() }()
return server return server
} }

View File

@ -16,6 +16,7 @@ import (
) )
func HandleEvent(ws *websocket.Conn, message []interface{}) { func HandleEvent(ws *websocket.Conn, message []interface{}) {
utils.LimitedGoRoutine(func() {
if len(message) != 2 { if len(message) != 2 {
fmt.Println("Invalid EVENT message format") fmt.Println("Invalid EVENT message format")
response.SendNotice(ws, "", "Invalid EVENT message format") response.SendNotice(ws, "", "Invalid EVENT message format")
@ -47,6 +48,7 @@ func HandleEvent(ws *websocket.Conn, message []interface{}) {
HandleKind(context.TODO(), evt, ws, eventSize) HandleKind(context.TODO(), evt, ws, eventSize)
fmt.Println("Event processed:", evt.ID) fmt.Println("Event processed:", evt.ID)
})
} }
func HandleKind(ctx context.Context, evt relay.Event, ws *websocket.Conn, eventSize int) { func HandleKind(ctx context.Context, evt relay.Event, ws *websocket.Conn, eventSize int) {
@ -152,4 +154,3 @@ func determineCategory(kind int) string {
return "unknown" return "unknown"
} }
} }

View File

@ -21,6 +21,7 @@ var subscriptions = make(map[string]relay.Subscription)
var mu sync.Mutex // Protect concurrent access to subscriptions map var mu sync.Mutex // Protect concurrent access to subscriptions map
func HandleReq(ws *websocket.Conn, message []interface{}, subscriptions map[string][]relay.Filter) { func HandleReq(ws *websocket.Conn, message []interface{}, subscriptions map[string][]relay.Filter) {
utils.LimitedGoRoutine(func() {
if len(message) < 3 { if len(message) < 3 {
fmt.Println("Invalid REQ message format") fmt.Println("Invalid REQ message format")
response.SendClosed(ws, "", "invalid: invalid REQ message format") response.SendClosed(ws, "", "invalid: invalid REQ message format")
@ -102,6 +103,7 @@ func HandleReq(ws *websocket.Conn, message []interface{}, subscriptions map[stri
response.SendClosed(ws, subID, "error: could not send EOSE") response.SendClosed(ws, subID, "error: could not send EOSE")
return return
} }
})
} }
// QueryEvents queries events from the MongoDB collection based on filters // QueryEvents queries events from the MongoDB collection based on filters

View File

@ -0,0 +1,113 @@
package utils
import (
"log"
"runtime"
"runtime/debug"
"sync"
"time"
configTypes "grain/config/types"
)
var (
maxGoroutinesChan chan struct{}
wg sync.WaitGroup
goroutineQueue []func()
goroutineQueueMutex sync.Mutex
)
func ApplyResourceLimits(cfg *configTypes.ResourceLimits) {
// Set CPU cores
runtime.GOMAXPROCS(cfg.CPUCores)
// Set maximum heap size
if cfg.HeapSizeMB > 0 {
heapSize := int64(uint64(cfg.HeapSizeMB) * 1024 * 1024)
debug.SetMemoryLimit(heapSize)
log.Printf("Heap size limited to %d MB\n", cfg.HeapSizeMB)
}
// Start monitoring memory usage
if cfg.MemoryMB > 0 {
go monitorMemoryUsage(cfg.MemoryMB)
log.Printf("Max memory usage limited to %d MB\n", cfg.MemoryMB)
}
// Set maximum number of Go routines
if cfg.MaxGoroutines > 0 {
maxGoroutinesChan = make(chan struct{}, cfg.MaxGoroutines)
log.Printf("Max goroutines limited to %d\n", cfg.MaxGoroutines)
}
}
// LimitedGoRoutine starts a goroutine with limit enforcement
func LimitedGoRoutine(f func()) {
// By default, all routines are considered critical
goroutineQueueMutex.Lock()
goroutineQueue = append(goroutineQueue, f)
goroutineQueueMutex.Unlock()
attemptToStartGoroutine()
}
func attemptToStartGoroutine() {
goroutineQueueMutex.Lock()
defer goroutineQueueMutex.Unlock()
if len(goroutineQueue) > 0 {
select {
case maxGoroutinesChan <- struct{}{}:
wg.Add(1)
go func(f func()) {
defer func() {
wg.Done()
<-maxGoroutinesChan
attemptToStartGoroutine()
}()
f()
}(goroutineQueue[0])
// Remove the started goroutine from the queue
goroutineQueue = goroutineQueue[1:]
default:
// If the channel is full, consider dropping the oldest non-critical goroutine
dropOldestNonCriticalGoroutine()
}
}
}
func dropOldestNonCriticalGoroutine() {
goroutineQueueMutex.Lock()
defer goroutineQueueMutex.Unlock()
if len(goroutineQueue) > 0 {
log.Println("Dropping the oldest non-critical goroutine to free resources.")
goroutineQueue = goroutineQueue[1:]
attemptToStartGoroutine()
}
}
func WaitForGoroutines() {
wg.Wait()
}
func monitorMemoryUsage(maxMemoryMB int) {
for {
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
usedMemoryMB := int(memStats.Alloc / 1024 / 1024)
if usedMemoryMB > maxMemoryMB {
log.Printf("Memory usage exceeded limit: %d MB used, limit is %d MB\n", usedMemoryMB, maxMemoryMB)
debug.FreeOSMemory() // Attempt to free memory
// If memory usage is still high, attempt to drop non-critical goroutines
if usedMemoryMB > maxMemoryMB {
dropOldestNonCriticalGoroutine()
}
}
time.Sleep(1 * time.Second)
}
}