feat: backend core - auth, user, role, permission, device, webhook, monitoring, cache, repository, service, middleware, API handlers

This commit is contained in:
2026-04-02 11:19:50 +08:00
parent e59a77bc49
commit dcc1f186f8
298 changed files with 62603 additions and 0 deletions

108
internal/cache/cache_manager.go vendored Normal file
View File

@@ -0,0 +1,108 @@
package cache
import (
"context"
"time"
)
// CacheManager 缓存管理器
type CacheManager struct {
l1 *L1Cache
l2 L2Cache
}
// NewCacheManager 创建缓存管理器
func NewCacheManager(l1 *L1Cache, l2 L2Cache) *CacheManager {
return &CacheManager{
l1: l1,
l2: l2,
}
}
// Get 获取缓存先从L1获取再从L2获取
func (cm *CacheManager) Get(ctx context.Context, key string) (interface{}, bool) {
// 先从L1缓存获取
if value, ok := cm.l1.Get(key); ok {
return value, true
}
// 再从L2缓存获取
if cm.l2 != nil {
if value, err := cm.l2.Get(ctx, key); err == nil && value != nil {
// 回写L1缓存
cm.l1.Set(key, value, 5*time.Minute)
return value, true
}
}
return nil, false
}
// Set 设置缓存同时写入L1和L2
func (cm *CacheManager) Set(ctx context.Context, key string, value interface{}, l1TTL, l2TTL time.Duration) error {
// 写入L1缓存
cm.l1.Set(key, value, l1TTL)
// 写入L2缓存
if cm.l2 != nil {
if err := cm.l2.Set(ctx, key, value, l2TTL); err != nil {
// L2写入失败不影响整体流程
return err
}
}
return nil
}
// Delete 删除缓存同时删除L1和L2
func (cm *CacheManager) Delete(ctx context.Context, key string) error {
// 删除L1缓存
cm.l1.Delete(key)
// 删除L2缓存
if cm.l2 != nil {
return cm.l2.Delete(ctx, key)
}
return nil
}
// Exists 检查缓存是否存在
func (cm *CacheManager) Exists(ctx context.Context, key string) bool {
// 先检查L1
if _, ok := cm.l1.Get(key); ok {
return true
}
// 再检查L2
if cm.l2 != nil {
if exists, err := cm.l2.Exists(ctx, key); err == nil && exists {
return true
}
}
return false
}
// Clear 清空缓存
func (cm *CacheManager) Clear(ctx context.Context) error {
// 清空L1缓存
cm.l1.Clear()
// 清空L2缓存
if cm.l2 != nil {
return cm.l2.Clear(ctx)
}
return nil
}
// GetL1 获取L1缓存
func (cm *CacheManager) GetL1() *L1Cache {
return cm.l1
}
// GetL2 获取L2缓存
func (cm *CacheManager) GetL2() L2Cache {
return cm.l2
}

245
internal/cache/cache_test.go vendored Normal file
View File

@@ -0,0 +1,245 @@
package cache_test
import (
"context"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/user-management-system/internal/cache"
)
// TestRedisCache_Disabled 测试禁用状态的RedisCache不报错
func TestRedisCache_Disabled(t *testing.T) {
c := cache.NewRedisCache(false)
ctx := context.Background()
if err := c.Set(ctx, "key", "value", time.Minute); err != nil {
t.Errorf("disabled cache Set should not error: %v", err)
}
val, err := c.Get(ctx, "key")
if err != nil {
t.Errorf("disabled cache Get should not error: %v", err)
}
if val != nil {
t.Errorf("disabled cache Get should return nil, got: %v", val)
}
if err := c.Delete(ctx, "key"); err != nil {
t.Errorf("disabled cache Delete should not error: %v", err)
}
exists, err := c.Exists(ctx, "key")
if err != nil {
t.Errorf("disabled cache Exists should not error: %v", err)
}
if exists {
t.Error("disabled cache Exists should return false")
}
if err := c.Clear(ctx); err != nil {
t.Errorf("disabled cache Clear should not error: %v", err)
}
if err := c.Close(); err != nil {
t.Errorf("disabled cache Close should not error: %v", err)
}
}
// TestL1Cache_SetGet 测试L1内存缓存的基本读写
func TestL1Cache_SetGet(t *testing.T) {
l1 := cache.NewL1Cache()
l1.Set("user:1", "alice", time.Minute)
val, ok := l1.Get("user:1")
if !ok {
t.Fatal("L1 Get: expected hit")
}
if val != "alice" {
t.Errorf("L1 Get value = %v, want alice", val)
}
}
// TestL1Cache_Expiration 测试L1缓存过期
func TestL1Cache_Expiration(t *testing.T) {
l1 := cache.NewL1Cache()
l1.Set("expire:1", "v", 50*time.Millisecond)
time.Sleep(100 * time.Millisecond)
_, ok := l1.Get("expire:1")
if ok {
t.Error("L1 key should have expired")
}
}
// TestL1Cache_Delete 测试L1缓存删除
func TestL1Cache_Delete(t *testing.T) {
l1 := cache.NewL1Cache()
l1.Set("del:1", "v", time.Minute)
l1.Delete("del:1")
_, ok := l1.Get("del:1")
if ok {
t.Error("L1 key should be deleted")
}
}
// TestL1Cache_Clear 测试L1缓存清空
func TestL1Cache_Clear(t *testing.T) {
l1 := cache.NewL1Cache()
l1.Set("a", 1, time.Minute)
l1.Set("b", 2, time.Minute)
l1.Clear()
_, ok1 := l1.Get("a")
_, ok2 := l1.Get("b")
if ok1 || ok2 {
t.Error("L1 cache should be empty after Clear()")
}
}
// TestL1Cache_Size 测试L1缓存大小统计
func TestL1Cache_Size(t *testing.T) {
l1 := cache.NewL1Cache()
l1.Set("s1", 1, time.Minute)
l1.Set("s2", 2, time.Minute)
l1.Set("s3", 3, time.Minute)
if l1.Size() != 3 {
t.Errorf("L1 Size = %d, want 3", l1.Size())
}
l1.Delete("s1")
if l1.Size() != 2 {
t.Errorf("L1 Size after Delete = %d, want 2", l1.Size())
}
}
// TestL1Cache_Cleanup 测试L1过期键清理
func TestL1Cache_Cleanup(t *testing.T) {
l1 := cache.NewL1Cache()
l1.Set("exp", "v", 30*time.Millisecond)
l1.Set("keep", "v", time.Minute)
time.Sleep(60 * time.Millisecond)
l1.Cleanup()
if l1.Size() != 1 {
t.Errorf("after Cleanup L1 Size = %d, want 1", l1.Size())
}
}
// TestCacheManager_SetGet 测试CacheManager读写仅L1
func TestCacheManager_SetGet(t *testing.T) {
l1 := cache.NewL1Cache()
cm := cache.NewCacheManager(l1, nil)
ctx := context.Background()
if err := cm.Set(ctx, "k1", "v1", time.Minute, time.Minute); err != nil {
t.Fatalf("CacheManager Set error: %v", err)
}
val, ok := cm.Get(ctx, "k1")
if !ok {
t.Fatal("CacheManager Get: expected hit")
}
if val != "v1" {
t.Errorf("CacheManager Get value = %v, want v1", val)
}
}
// TestCacheManager_Delete 测试CacheManager删除
func TestCacheManager_Delete(t *testing.T) {
l1 := cache.NewL1Cache()
cm := cache.NewCacheManager(l1, nil)
ctx := context.Background()
_ = cm.Set(ctx, "del:1", "v", time.Minute, time.Minute)
if err := cm.Delete(ctx, "del:1"); err != nil {
t.Fatalf("CacheManager Delete error: %v", err)
}
_, ok := cm.Get(ctx, "del:1")
if ok {
t.Error("CacheManager key should be deleted")
}
}
// TestCacheManager_Exists 测试CacheManager存在性检查
func TestCacheManager_Exists(t *testing.T) {
l1 := cache.NewL1Cache()
cm := cache.NewCacheManager(l1, nil)
ctx := context.Background()
if cm.Exists(ctx, "notexist") {
t.Error("CacheManager Exists should return false for missing key")
}
_ = cm.Set(ctx, "exist:1", "v", time.Minute, time.Minute)
if !cm.Exists(ctx, "exist:1") {
t.Error("CacheManager Exists should return true after Set")
}
}
// TestCacheManager_Clear 测试CacheManager清空
func TestCacheManager_Clear(t *testing.T) {
l1 := cache.NewL1Cache()
cm := cache.NewCacheManager(l1, nil)
ctx := context.Background()
_ = cm.Set(ctx, "a", 1, time.Minute, time.Minute)
_ = cm.Set(ctx, "b", 2, time.Minute, time.Minute)
if err := cm.Clear(ctx); err != nil {
t.Fatalf("CacheManager Clear error: %v", err)
}
if cm.Exists(ctx, "a") || cm.Exists(ctx, "b") {
t.Error("CacheManager should be empty after Clear()")
}
}
// TestCacheManager_Concurrent 测试CacheManager并发安全
func TestCacheManager_Concurrent(t *testing.T) {
l1 := cache.NewL1Cache()
cm := cache.NewCacheManager(l1, nil)
ctx := context.Background()
var wg sync.WaitGroup
var hitCount int64
// 预热
_ = cm.Set(ctx, "concurrent:key", "v", time.Minute, time.Minute)
// 并发读写
for i := 0; i < 50; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < 20; j++ {
if _, ok := cm.Get(ctx, "concurrent:key"); ok {
atomic.AddInt64(&hitCount, 1)
}
}
}()
}
wg.Wait()
if hitCount == 0 {
t.Error("concurrent cache reads should produce hits")
}
}
// TestCacheManager_WithDisabledL2 测试CacheManager配合禁用L2
func TestCacheManager_WithDisabledL2(t *testing.T) {
l1 := cache.NewL1Cache()
l2 := cache.NewRedisCache(false) // disabled
cm := cache.NewCacheManager(l1, l2)
ctx := context.Background()
if err := cm.Set(ctx, "k", "v", time.Minute, time.Minute); err != nil {
t.Fatalf("Set with disabled L2 should not error: %v", err)
}
val, ok := cm.Get(ctx, "k")
if !ok || val != "v" {
t.Errorf("Get from L1 after Set = (%v, %v), want (v, true)", val, ok)
}
}

171
internal/cache/l1.go vendored Normal file
View File

@@ -0,0 +1,171 @@
package cache
import (
"sync"
"time"
)
const (
// maxItems 是L1Cache的最大条目数
// 超过此限制后将淘汰最久未使用的条目
maxItems = 10000
)
// CacheItem 缓存项
type CacheItem struct {
Value interface{}
Expiration int64
}
// Expired 判断缓存项是否过期
func (item *CacheItem) Expired() bool {
return item.Expiration > 0 && time.Now().UnixNano() > item.Expiration
}
// L1Cache L1本地缓存支持LRU淘汰策略
type L1Cache struct {
items map[string]*CacheItem
mu sync.RWMutex
// accessOrder 记录key的访问顺序用于LRU淘汰
// 第一个是最久未使用的,最后一个是最近使用的
accessOrder []string
}
// NewL1Cache 创建L1缓存
func NewL1Cache() *L1Cache {
return &L1Cache{
items: make(map[string]*CacheItem),
}
}
// Set 设置缓存
func (c *L1Cache) Set(key string, value interface{}, ttl time.Duration) {
c.mu.Lock()
defer c.mu.Unlock()
var expiration int64
if ttl > 0 {
expiration = time.Now().Add(ttl).UnixNano()
}
// 如果key已存在更新访问顺序
if _, exists := c.items[key]; exists {
c.items[key] = &CacheItem{
Value: value,
Expiration: expiration,
}
c.updateAccessOrder(key)
return
}
// 检查是否超过最大容量进行LRU淘汰
if len(c.items) >= maxItems {
c.evictLRU()
}
c.items[key] = &CacheItem{
Value: value,
Expiration: expiration,
}
c.accessOrder = append(c.accessOrder, key)
}
// evictLRU 淘汰最久未使用的条目
func (c *L1Cache) evictLRU() {
if len(c.accessOrder) == 0 {
return
}
// 淘汰最久未使用的(第一个)
oldest := c.accessOrder[0]
delete(c.items, oldest)
c.accessOrder = c.accessOrder[1:]
}
// removeFromAccessOrder 从访问顺序中移除key
func (c *L1Cache) removeFromAccessOrder(key string) {
for i, k := range c.accessOrder {
if k == key {
c.accessOrder = append(c.accessOrder[:i], c.accessOrder[i+1:]...)
return
}
}
}
// updateAccessOrder 更新访问顺序将key移到最后最近使用
func (c *L1Cache) updateAccessOrder(key string) {
for i, k := range c.accessOrder {
if k == key {
// 移除当前位置
c.accessOrder = append(c.accessOrder[:i], c.accessOrder[i+1:]...)
// 添加到末尾
c.accessOrder = append(c.accessOrder, key)
return
}
}
}
// Get 获取缓存
func (c *L1Cache) Get(key string) (interface{}, bool) {
c.mu.Lock()
defer c.mu.Unlock()
item, ok := c.items[key]
if !ok {
return nil, false
}
if item.Expired() {
delete(c.items, key)
c.removeFromAccessOrder(key)
return nil, false
}
// 更新访问顺序
c.updateAccessOrder(key)
return item.Value, true
}
// Delete 删除缓存
func (c *L1Cache) Delete(key string) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.items, key)
c.removeFromAccessOrder(key)
}
// Clear 清空缓存
func (c *L1Cache) Clear() {
c.mu.Lock()
defer c.mu.Unlock()
c.items = make(map[string]*CacheItem)
c.accessOrder = make([]string, 0)
}
// Size 获取缓存大小
func (c *L1Cache) Size() int {
c.mu.RLock()
defer c.mu.RUnlock()
return len(c.items)
}
// Cleanup 清理过期缓存
func (c *L1Cache) Cleanup() {
c.mu.Lock()
defer c.mu.Unlock()
now := time.Now().UnixNano()
keysToDelete := make([]string, 0)
for key, item := range c.items {
if item.Expiration > 0 && now > item.Expiration {
keysToDelete = append(keysToDelete, key)
}
}
for _, key := range keysToDelete {
delete(c.items, key)
c.removeFromAccessOrder(key)
}
}

165
internal/cache/l2.go vendored Normal file
View File

@@ -0,0 +1,165 @@
package cache
import (
"context"
"encoding/json"
"errors"
"strings"
"time"
redis "github.com/redis/go-redis/v9"
)
// L2Cache defines the distributed cache contract.
type L2Cache interface {
Set(ctx context.Context, key string, value interface{}, ttl time.Duration) error
Get(ctx context.Context, key string) (interface{}, error)
Delete(ctx context.Context, key string) error
Exists(ctx context.Context, key string) (bool, error)
Clear(ctx context.Context) error
Close() error
}
// RedisCacheConfig configures the Redis-backed L2 cache.
type RedisCacheConfig struct {
Enabled bool
Addr string
Password string
DB int
PoolSize int
}
// RedisCache implements L2Cache using Redis.
type RedisCache struct {
enabled bool
client *redis.Client
}
// NewRedisCache keeps the old test-friendly constructor.
func NewRedisCache(enabled bool) *RedisCache {
return NewRedisCacheWithConfig(RedisCacheConfig{Enabled: enabled})
}
// NewRedisCacheWithConfig creates a Redis-backed L2 cache.
func NewRedisCacheWithConfig(cfg RedisCacheConfig) *RedisCache {
cache := &RedisCache{enabled: cfg.Enabled}
if !cfg.Enabled {
return cache
}
addr := cfg.Addr
if addr == "" {
addr = "localhost:6379"
}
options := &redis.Options{
Addr: addr,
Password: cfg.Password,
DB: cfg.DB,
}
if cfg.PoolSize > 0 {
options.PoolSize = cfg.PoolSize
}
cache.client = redis.NewClient(options)
return cache
}
func (c *RedisCache) Set(ctx context.Context, key string, value interface{}, ttl time.Duration) error {
if !c.enabled || c.client == nil {
return nil
}
payload, err := json.Marshal(value)
if err != nil {
return err
}
return c.client.Set(ctx, key, payload, ttl).Err()
}
func (c *RedisCache) Get(ctx context.Context, key string) (interface{}, error) {
if !c.enabled || c.client == nil {
return nil, nil
}
raw, err := c.client.Get(ctx, key).Result()
if errors.Is(err, redis.Nil) {
return nil, nil
}
if err != nil {
return nil, err
}
return decodeRedisValue(raw)
}
func (c *RedisCache) Delete(ctx context.Context, key string) error {
if !c.enabled || c.client == nil {
return nil
}
return c.client.Del(ctx, key).Err()
}
func (c *RedisCache) Exists(ctx context.Context, key string) (bool, error) {
if !c.enabled || c.client == nil {
return false, nil
}
count, err := c.client.Exists(ctx, key).Result()
if err != nil {
return false, err
}
return count > 0, nil
}
func (c *RedisCache) Clear(ctx context.Context) error {
if !c.enabled || c.client == nil {
return nil
}
return c.client.FlushDB(ctx).Err()
}
func (c *RedisCache) Close() error {
if !c.enabled || c.client == nil {
return nil
}
return c.client.Close()
}
func decodeRedisValue(raw string) (interface{}, error) {
decoder := json.NewDecoder(strings.NewReader(raw))
decoder.UseNumber()
var value interface{}
if err := decoder.Decode(&value); err != nil {
return raw, nil
}
return normalizeRedisValue(value), nil
}
func normalizeRedisValue(value interface{}) interface{} {
switch v := value.(type) {
case json.Number:
if n, err := v.Int64(); err == nil {
return n
}
if n, err := v.Float64(); err == nil {
return n
}
return v.String()
case []interface{}:
for i := range v {
v[i] = normalizeRedisValue(v[i])
}
return v
case map[string]interface{}:
for key, item := range v {
v[key] = normalizeRedisValue(item)
}
return v
default:
return v
}
}

View File

@@ -0,0 +1,98 @@
package cache_test
import (
"context"
"testing"
"time"
"github.com/alicebob/miniredis/v2"
"github.com/user-management-system/internal/cache"
)
func TestRedisCache_EnabledRoundTrip(t *testing.T) {
redisServer := miniredis.RunT(t)
l2 := cache.NewRedisCacheWithConfig(cache.RedisCacheConfig{
Enabled: true,
Addr: redisServer.Addr(),
})
t.Cleanup(func() {
_ = l2.Close()
})
ctx := context.Background()
if err := l2.Set(ctx, "login_attempt:user:7", 3, time.Minute); err != nil {
t.Fatalf("set redis value failed: %v", err)
}
value, err := l2.Get(ctx, "login_attempt:user:7")
if err != nil {
t.Fatalf("get redis value failed: %v", err)
}
count, ok := value.(int64)
if !ok || count != 3 {
t.Fatalf("expected int64(3), got (%T) %v", value, value)
}
exists, err := l2.Exists(ctx, "login_attempt:user:7")
if err != nil {
t.Fatalf("exists failed: %v", err)
}
if !exists {
t.Fatal("expected redis key to exist")
}
if err := l2.Delete(ctx, "login_attempt:user:7"); err != nil {
t.Fatalf("delete failed: %v", err)
}
exists, err = l2.Exists(ctx, "login_attempt:user:7")
if err != nil {
t.Fatalf("exists after delete failed: %v", err)
}
if exists {
t.Fatal("expected redis key to be deleted")
}
}
func TestCacheManager_ReadsThroughRedisL2(t *testing.T) {
redisServer := miniredis.RunT(t)
l1 := cache.NewL1Cache()
l2 := cache.NewRedisCacheWithConfig(cache.RedisCacheConfig{
Enabled: true,
Addr: redisServer.Addr(),
})
t.Cleanup(func() {
_ = l2.Close()
})
ctx := context.Background()
if err := l2.Set(ctx, "email_daily:user@example.com:2026-03-18", 4, time.Minute); err != nil {
t.Fatalf("seed redis value failed: %v", err)
}
manager := cache.NewCacheManager(l1, l2)
value, ok := manager.Get(ctx, "email_daily:user@example.com:2026-03-18")
if !ok {
t.Fatal("expected cache manager to read from redis l2")
}
count, ok := value.(int64)
if !ok || count != 4 {
t.Fatalf("expected int64(4), got (%T) %v", value, value)
}
if err := l2.Delete(ctx, "email_daily:user@example.com:2026-03-18"); err != nil {
t.Fatalf("delete redis seed failed: %v", err)
}
value, ok = manager.Get(ctx, "email_daily:user@example.com:2026-03-18")
if !ok {
t.Fatal("expected cache manager to rehydrate l1 after redis read")
}
if count, ok := value.(int64); !ok || count != 4 {
t.Fatalf("expected l1 to retain int64(4), got (%T) %v", value, value)
}
}