mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-08-12 00:40:25 -04:00
added and used LevelHandler
This commit is contained in:
parent
e40a99ca3a
commit
0e4d220c92
7 changed files with 56 additions and 24 deletions
|
@ -50,8 +50,7 @@ func main() {
|
|||
if *gRPCDebug {
|
||||
logger.ReplaceGRPCLogger(log.WithGroup("gRPC"))
|
||||
} else {
|
||||
//TODO(miampf): Find a good way to dynamically increase slog logLevel
|
||||
logger.ReplaceGRPCLogger(log.WithGroup("gRPC")).WithIncreasedLevel(slog.LevelWarn)
|
||||
logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, log.Handler())).WithGroup("gRPC"))
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
pb "github.com/edgelesssys/constellation/v2/debugd/service"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
)
|
||||
|
@ -157,8 +156,7 @@ func Start(log *slog.Logger, wg *sync.WaitGroup, serv pb.DebugdServer) {
|
|||
defer wg.Done()
|
||||
|
||||
grpcLog := log.WithGroup("gRPC")
|
||||
// TODO(miampf): Find a way to dynamically increase the log level
|
||||
grpcLog.WithIncreasedLevel(zap.WarnLevel).ReplaceGRPCLogger()
|
||||
logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, grpcLog.Handler())))
|
||||
|
||||
grpcServer := grpc.NewServer(
|
||||
logger.GetServerStreamInterceptor(grpcLog),
|
||||
|
|
|
@ -123,6 +123,52 @@ func middlewareLogger(l *slog.Logger) logging.Logger {
|
|||
})
|
||||
}
|
||||
|
||||
// LevelHandler copied from the official LevelHandler example in the slog package documentation.
|
||||
|
||||
// LevelHandler wraps a Handler with an Enabled method
|
||||
// that returns false for levels below a minimum.
|
||||
type LevelHandler struct {
|
||||
level slog.Leveler
|
||||
handler slog.Handler
|
||||
}
|
||||
|
||||
// NewLevelHandler returns a LevelHandler with the given level.
|
||||
// All methods except Enabled delegate to h.
|
||||
func NewLevelHandler(level slog.Leveler, h slog.Handler) *LevelHandler {
|
||||
// Optimization: avoid chains of LevelHandlers.
|
||||
if lh, ok := h.(*LevelHandler); ok {
|
||||
h = lh.Handler()
|
||||
}
|
||||
return &LevelHandler{level, h}
|
||||
}
|
||||
|
||||
// Enabled implements Handler.Enabled by reporting whether
|
||||
// level is at least as large as h's level.
|
||||
func (h *LevelHandler) Enabled(_ context.Context, level slog.Level) bool {
|
||||
return level >= h.level.Level()
|
||||
}
|
||||
|
||||
// Handle implements Handler.Handle.
|
||||
func (h *LevelHandler) Handle(ctx context.Context, r slog.Record) error {
|
||||
return h.handler.Handle(ctx, r)
|
||||
}
|
||||
|
||||
// WithAttrs implements Handler.WithAttrs.
|
||||
func (h *LevelHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
|
||||
return NewLevelHandler(h.level, h.handler.WithAttrs(attrs))
|
||||
}
|
||||
|
||||
// WithGroup implements Handler.WithGroup.
|
||||
func (h *LevelHandler) WithGroup(name string) slog.Handler {
|
||||
return NewLevelHandler(h.level, h.handler.WithGroup(name))
|
||||
}
|
||||
|
||||
// Handler returns the Handler wrapped by h.
|
||||
func (h *LevelHandler) Handler() slog.Handler {
|
||||
return h.handler
|
||||
}
|
||||
|
||||
// TestWriter is a writer to a testing.T used in tests for logging with slog.
|
||||
type TestWriter struct {
|
||||
T *testing.T
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions/components"
|
||||
"github.com/edgelesssys/constellation/v2/joinservice/joinproto"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
@ -59,8 +58,7 @@ func New(
|
|||
|
||||
// Run starts the gRPC server on the given port, using the provided tlsConfig.
|
||||
func (s *Server) Run(creds credentials.TransportCredentials, port string) error {
|
||||
// TODO(miampf): Find a good way to increase slogs log level
|
||||
s.log.WithIncreasedLevel(zap.WarnLevel).Named("gRPC").ReplaceGRPCLogger()
|
||||
logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, s.log.Handler())).WithGroup("gRPC"))
|
||||
grpcServer := grpc.NewServer(
|
||||
grpc.Creds(creds),
|
||||
logger.GetServerUnaryInterceptor(s.log.WithGroup("gRPC")),
|
||||
|
@ -72,7 +70,7 @@ func (s *Server) Run(creds credentials.TransportCredentials, port string) error
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to listen: %s", err)
|
||||
}
|
||||
s.log.Info("Starting join service on %s", lis.Addr().String())
|
||||
s.log.Info(fmt.Sprintf("Starting join service on %s", lis.Addr().String()))
|
||||
return grpcServer.Serve(lis)
|
||||
}
|
||||
|
||||
|
@ -115,7 +113,7 @@ func (s *Server) IssueJoinTicket(ctx context.Context, req *joinproto.IssueJoinTi
|
|||
return nil, status.Errorf(codes.Internal, "getting components ConfigMap name: %s", err)
|
||||
}
|
||||
|
||||
log.Info("Querying %s ConfigMap for components", componentsConfigMapName)
|
||||
log.Info(fmt.Sprintf("Querying %s ConfigMap for components", componentsConfigMapName))
|
||||
components, err := s.kubeClient.GetComponents(ctx, componentsConfigMapName)
|
||||
if err != nil {
|
||||
log.With(slog.Any("error", err)).Error("Failed getting components from ConfigMap")
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
"github.com/edgelesssys/constellation/v2/internal/kms/kms"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/edgelesssys/constellation/v2/keyservice/keyserviceproto"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
@ -51,11 +50,7 @@ func (s *Server) Run(port string) error {
|
|||
|
||||
server := grpc.NewServer(logger.GetServerUnaryInterceptor(s.log.WithGroup("gRPC")))
|
||||
keyserviceproto.RegisterAPIServer(server, s)
|
||||
// TODO(miampf): Find out a good way to pass an increased Level to slog.
|
||||
// A reference implementation for something like that exists
|
||||
// [here](https://pkg.go.dev/log/slog#Handler), however, this would
|
||||
// utilise structs in the logger package again which is not optimal.
|
||||
logger.ReplaceGRPCLogger(s.log.WithGroup("gRPC").WithIncreasedLevel(zapcore.WarnLevel))
|
||||
logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, s.log.Handler())).WithGroup("gRPC"))
|
||||
|
||||
// start the server
|
||||
s.log.Info("Starting Constellation key management service on %s", listener.Addr().String())
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/edgelesssys/constellation/v2/upgrade-agent/internal/server"
|
||||
"github.com/spf13/afero"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -33,8 +32,7 @@ func main() {
|
|||
if *gRPCDebug {
|
||||
logger.ReplaceGRPCLogger(log.WithGroup("gRPC"))
|
||||
} else {
|
||||
// TODO(miampf): Find a good way to change log level dynamically
|
||||
log.WithGroup("gRPC").WithIncreasedLevel(zap.WarnLevel).ReplaceGRPCLogger()
|
||||
logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, log.Handler())).WithGroup("gRPC"))
|
||||
}
|
||||
|
||||
handler := file.NewHandler(afero.NewOsFs())
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/edgelesssys/constellation/v2/verify/verifyproto"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
|
@ -57,8 +56,7 @@ func (s *Server) Run(httpListener, grpcListener net.Listener) error {
|
|||
var wg sync.WaitGroup
|
||||
var once sync.Once
|
||||
|
||||
//TODO(miampf): Find a good way to dynamically increase/change log level
|
||||
s.log.WithIncreasedLevel(zapcore.WarnLevel).Named("grpc").ReplaceGRPCLogger()
|
||||
logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, s.log.Handler()).WithGroup("grpc")))
|
||||
grpcServer := grpc.NewServer(
|
||||
logger.GetServerUnaryInterceptor(s.log.WithGroup("gRPC")),
|
||||
grpc.KeepaliveParams(keepalive.ServerParameters{Time: 15 * time.Second}),
|
||||
|
@ -74,7 +72,7 @@ func (s *Server) Run(httpListener, grpcListener net.Listener) error {
|
|||
defer wg.Done()
|
||||
defer grpcServer.GracefulStop()
|
||||
|
||||
s.log.Info("Starting HTTP server on %s", httpListener.Addr().String())
|
||||
s.log.Info(fmt.Sprintf("Starting HTTP server on %s", httpListener.Addr().String()))
|
||||
httpErr := httpServer.Serve(httpListener)
|
||||
if httpErr != nil && httpErr != http.ErrServerClosed {
|
||||
once.Do(func() { err = httpErr })
|
||||
|
@ -86,7 +84,7 @@ func (s *Server) Run(httpListener, grpcListener net.Listener) error {
|
|||
defer wg.Done()
|
||||
defer func() { _ = httpServer.Shutdown(context.Background()) }()
|
||||
|
||||
s.log.Info("Starting gRPC server on %s", grpcListener.Addr().String())
|
||||
s.log.Info(fmt.Sprintf("Starting gRPC server on %s", grpcListener.Addr().String()))
|
||||
grpcErr := grpcServer.Serve(grpcListener)
|
||||
if grpcErr != nil {
|
||||
once.Do(func() { err = grpcErr })
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue