build(deps): bump github.com/olekukonko/tablewriter from 1.0.8 to 1.0.9

Bumps [github.com/olekukonko/tablewriter](https://github.com/olekukonko/tablewriter) from 1.0.8 to 1.0.9.
- [Commits](https://github.com/olekukonko/tablewriter/compare/v1.0.8...v1.0.9)

---
updated-dependencies:
- dependency-name: github.com/olekukonko/tablewriter
  dependency-version: 1.0.9
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2025-08-26 06:40:11 +00:00
committed by GitHub
parent db583c4644
commit 5e6fc50e5e
23 changed files with 2385 additions and 383 deletions

6
go.mod
View File

@@ -58,7 +58,7 @@ require (
github.com/nats-io/nats-server/v2 v2.11.7
github.com/nats-io/nats.go v1.44.0
github.com/oklog/run v1.2.0
github.com/olekukonko/tablewriter v1.0.8
github.com/olekukonko/tablewriter v1.0.9
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.25.1
github.com/onsi/gomega v1.38.0
@@ -265,8 +265,8 @@ require (
github.com/nats-io/nkeys v0.4.11 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/olekukonko/errors v0.0.0-20250405072817-4e6d85265da6 // indirect
github.com/olekukonko/ll v0.0.8 // indirect
github.com/olekukonko/errors v1.1.0 // indirect
github.com/olekukonko/ll v0.0.9 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c // indirect
github.com/pablodz/inotifywaitgo v0.0.9 // indirect

12
go.sum
View File

@@ -841,13 +841,13 @@ github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+
github.com/oklog/run v1.2.0 h1:O8x3yXwah4A73hJdlrwo/2X6J62gE5qTMusH0dvz60E=
github.com/oklog/run v1.2.0/go.mod h1:mgDbKRSwPhJfesJ4PntqFUbKQRZ50NgmZTSPlFA0YFk=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/errors v0.0.0-20250405072817-4e6d85265da6 h1:r3FaAI0NZK3hSmtTDrBVREhKULp8oUeqLT5Eyl2mSPo=
github.com/olekukonko/errors v0.0.0-20250405072817-4e6d85265da6/go.mod h1:ppzxA5jBKcO1vIpCXQ9ZqgDh8iwODz6OXIGKU8r5m4Y=
github.com/olekukonko/ll v0.0.8 h1:sbGZ1Fx4QxJXEqL/6IG8GEFnYojUSQ45dJVwN2FH2fc=
github.com/olekukonko/ll v0.0.8/go.mod h1:En+sEW0JNETl26+K8eZ6/W4UQ7CYSrrgg/EdIYT2H8g=
github.com/olekukonko/errors v1.1.0 h1:RNuGIh15QdDenh+hNvKrJkmxxjV4hcS50Db478Ou5sM=
github.com/olekukonko/errors v1.1.0/go.mod h1:ppzxA5jBKcO1vIpCXQ9ZqgDh8iwODz6OXIGKU8r5m4Y=
github.com/olekukonko/ll v0.0.9 h1:Y+1YqDfVkqMWuEQMclsF9HUR5+a82+dxJuL1HHSRpxI=
github.com/olekukonko/ll v0.0.9/go.mod h1:En+sEW0JNETl26+K8eZ6/W4UQ7CYSrrgg/EdIYT2H8g=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/olekukonko/tablewriter v1.0.8 h1:f6wJzHg4QUtJdvrVPKco4QTrAylgaU0+b9br/lJxEiQ=
github.com/olekukonko/tablewriter v1.0.8/go.mod h1:H428M+HzoUXC6JU2Abj9IT9ooRmdq9CxuDmKMtrOCMs=
github.com/olekukonko/tablewriter v1.0.9 h1:XGwRsYLC2bY7bNd93Dk51bcPZksWZmLYuaTHR0FqfL8=
github.com/olekukonko/tablewriter v1.0.9/go.mod h1:5c+EBPeSqvXnLLgkm9isDdzR3wjfBkHR9Nhfp3NWrzo=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=

View File

@@ -48,6 +48,11 @@ go get github.com/olekukonko/errors@latest
---
> [!NOTE]
> ✓ added support for `errors.Errorf("user %w not found", errors.New("bob"))`
> ✓ added support for `sequential chain` execution
``
## Using the `errors` Package
### Basic Error Creation
@@ -74,6 +79,7 @@ func main() {
#### Formatted Error
```go
// main.go
package main
import (
@@ -83,12 +89,21 @@ import (
func main() {
// Formatted error without stack trace
err := errors.Newf("user %s not found", "bob")
fmt.Println(err) // Output: "user bob not found"
errNoWrap := errors.Newf("user %s not found", "bob")
fmt.Println(errNoWrap) // Output: "user bob not found"
// Standard formatted error, no fmt.Errorf needed
stdErr := errors.Stdf("user %s not found", "bob")
fmt.Println(stdErr) // Output: "user bob not found"
// Standard formatted error, no fmt.Errorf needed (using own pkg)
stdErrNoWrap := errors.Stdf("user %s not found", "bob")
fmt.Println(stdErrNoWrap) // Output: "user bob not found"
// Added support for %w (compatible with fmt.Errorf output)
// errors.Errorf is alias of errors.Newf
errWrap := errors.Errorf("user %w not found", errors.New("bob"))
fmt.Println(errWrap) // Output: "user bob not found"
// Standard formatted error for comparison
stdErrWrap := fmt.Errorf("user %w not found", fmt.Errorf("bob"))
fmt.Println(stdErrWrap) // Output: "user bob not found"
}
```
@@ -243,7 +258,7 @@ func main() {
enhanced := errors.WithStack(err)
fmt.Println("Error with stack:")
fmt.Println("Message:", enhanced.Error()) // Output: "basic error"
fmt.Println("Stack:", enhanced.Stack()) // Output: e.g., ["main.go:15", ...]
fmt.Println("Stack:", enhanced.Stack()) // Output: e.g., "main.go:15"
}
```
@@ -996,6 +1011,198 @@ func contains(substr string) func(error) bool {
return strings.Contains(err.Error(), substr)
}
}
```
### Chain Execution
#### Sequential Task Processing
```go
package main
import (
"fmt"
"github.com/olekukonko/errors"
"time"
)
// validateOrder checks order input.
func validateOrder() error {
return nil // Simulate successful validation
}
// processKYC handles payment processing.
func processKYC() error {
return nil // Simulate successful validation
}
// processPayment handles payment processing.
func processPayment() error {
return errors.New("payment declined") // Simulate payment failure
}
// generateInvoice creates an invoice.
func generateInvoice() error {
return errors.New("invoicing unavailable") // Simulate invoicing issue
}
// sendNotification sends a confirmation.
func sendNotification() error {
return errors.New("notification failed") // Simulate notification failure
}
// processOrder simulates a multi-step order processing workflow.
func processOrder() error {
c := errors.NewChain()
// Validate order input
c.Step(validateOrder).Tag("validation")
// KYC Process
c.Step(validateOrder).Tag("validation")
// Process payment with retries
c.Step(processPayment).Tag("billing").Retry(3, 100*time.Millisecond)
// Generate invoice
c.Step(generateInvoice).Tag("invoicing")
// Send notification (optional)
c.Step(sendNotification).Tag("notification").Optional()
return c.Run()
}
func main() {
if err := processOrder(); err != nil {
// Print error to stderr and exit
errors.Inspect(err)
}
fmt.Println("Order processed successfully")
}
```
#### Sequential Task Processing 2
```go
package main
import (
"fmt"
"os"
"github.com/olekukonko/errors"
)
// validate simulates a validation check that fails.
func validate(name string) error {
return errors.Newf("validation for %s failed", name)
}
// validateOrder checks order input.
func validateOrder() error {
return nil // Simulate successful validation
}
// verifyKYC handles Know Your Customer verification.
func verifyKYC(name string) error {
return validate(name) // Simulate KYC validation failure
}
// processPayment handles payment processing.
func processPayment() error {
return nil // Simulate successful payment
}
// processOrder coordinates the order processing workflow.
func processOrder() error {
chain := errors.NewChain().
Step(validateOrder). // Step 1: Validate order
Call(verifyKYC, "john"). // Step 2: Verify customer
Step(processPayment) // Step 3: Process payment
if err := chain.Run(); err != nil {
return errors.Errorf("processing order: %w", err)
}
return nil
}
func main() {
if err := processOrder(); err != nil {
// Print the full error chain to stderr
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
// Output
// ERROR: processing order: validation for john failed
// For debugging, you could print the stack trace:
// errors.Inspect(err)
os.Exit(1)
}
fmt.Println("order processed successfully")
}
```
#### Retry with Timeout
```go
package main
import (
"context"
"fmt"
"github.com/olekukonko/errors"
"time"
)
func main() {
c := errors.NewChain(
errors.ChainWithTimeout(1*time.Second),
).
Step(func() error {
time.Sleep(2 * time.Second)
return errors.New("fetch failed")
}).
Tag("api").
Retry(3, 200*time.Millisecond)
err := c.Run()
if err != nil {
var deadlineErr error
if errors.As(err, &deadlineErr) && deadlineErr == context.DeadlineExceeded {
fmt.Println("Fetch timed out")
} else {
fmt.Printf("Fetch failed: %v\n", err)
}
return
}
fmt.Println("Fetch succeeded")
}
```
#### Collecting All Errors
```go
package main
import (
"fmt"
"github.com/olekukonko/errors"
)
func main() {
c := errors.NewChain(
errors.ChainWithMaxErrors(2),
).
Step(func() error { return errors.New("task 1 failed") }).Tag("task1").
Step(func() error { return nil }).Tag("task2").
Step(func() error { return errors.New("task 3 failed") }).Tag("task3")
err := c.RunAll()
if err != nil {
errors.Inspect(err)
return
}
fmt.Println("All tasks completed successfully")
}
```
@@ -1326,7 +1533,7 @@ func main() {
## FAQ
- **When to use `Copy()`?**
- Use `Copy()` to create a modifiable duplicate of an error without altering the original.
- Use ` SOCIALCopy()` to create a modifiable duplicate of an error without altering the original.
- **When to use `Free()`?**
- Use in performance-critical loops; otherwise, autofree handles it (Go 1.24+).
@@ -1355,4 +1562,4 @@ func main() {
- Fork, branch, commit, and PR—see [CONTRIBUTING.md](#).
## License
MIT License - See [LICENSE](LICENSE).
MIT License - See [LICENSE](LICENSE).

610
vendor/github.com/olekukonko/errors/chain.go generated vendored Normal file
View File

@@ -0,0 +1,610 @@
package errors
import (
"context"
"fmt"
"log/slog" // Standard structured logging package
"reflect"
"strings"
"time"
)
// Chain executes functions sequentially with enhanced error handling.
// Logging is optional and configured via a slog.Handler.
type Chain struct {
steps []chainStep // List of steps to execute
errors []error // Accumulated errors during execution
config chainConfig // Chain-wide configuration
lastStep *chainStep // Pointer to the last added step for configuration
logHandler slog.Handler // Optional logging handler (nil means no logging)
cancel context.CancelFunc // Function to cancel the context
}
// chainStep represents a single step in the chain.
type chainStep struct {
execute func() error // Function to execute for this step
optional bool // If true, errors don't stop the chain
config stepConfig // Step-specific configuration
}
// chainConfig holds chain-wide settings.
type chainConfig struct {
timeout time.Duration // Maximum duration for the entire chain
maxErrors int // Maximum number of errors before stopping (-1 for unlimited)
autoWrap bool // Whether to automatically wrap errors with additional context
}
// stepConfig holds configuration for an individual step.
type stepConfig struct {
context map[string]interface{} // Arbitrary key-value pairs for context
category ErrorCategory // Category for error classification
code int // Numeric error code
retry *Retry // Retry policy for the step
logOnFail bool // Whether to log errors automatically
metricsLabel string // Label for metrics (not used in this code)
logAttrs []slog.Attr // Additional attributes for logging
}
// ChainOption defines a function that configures a Chain.
type ChainOption func(*Chain)
// NewChain creates a new Chain with the given options.
// Logging is disabled by default (logHandler is nil).
func NewChain(opts ...ChainOption) *Chain {
c := &Chain{
config: chainConfig{
autoWrap: true, // Enable error wrapping by default
maxErrors: -1, // No limit on errors by default
},
// logHandler is nil, meaning no logging unless explicitly configured
}
// Apply each configuration option
for _, opt := range opts {
opt(c)
}
return c
}
// ChainWithLogHandler sets a custom slog.Handler for logging.
// If handler is nil, logging is effectively disabled.
func ChainWithLogHandler(handler slog.Handler) ChainOption {
return func(c *Chain) {
c.logHandler = handler
}
}
// ChainWithTimeout sets a timeout for the entire chain.
func ChainWithTimeout(d time.Duration) ChainOption {
return func(c *Chain) {
c.config.timeout = d
}
}
// ChainWithMaxErrors sets the maximum number of errors allowed.
// A value <= 0 means no limit.
func ChainWithMaxErrors(max int) ChainOption {
return func(c *Chain) {
if max <= 0 {
c.config.maxErrors = -1 // No limit
} else {
c.config.maxErrors = max
}
}
}
// ChainWithAutoWrap enables or disables automatic error wrapping.
func ChainWithAutoWrap(auto bool) ChainOption {
return func(c *Chain) {
c.config.autoWrap = auto
}
}
// Step adds a new step to the chain with the provided function.
// The function must return an error or nil.
func (c *Chain) Step(fn func() error) *Chain {
if fn == nil {
// Panic to enforce valid input
panic("Chain.Step: provided function cannot be nil")
}
// Create a new step with default configuration
step := chainStep{execute: fn, config: stepConfig{}}
c.steps = append(c.steps, step)
// Update lastStep to point to the newly added step
c.lastStep = &c.steps[len(c.steps)-1]
return c
}
// Call adds a step by wrapping a function with arguments.
// It uses reflection to validate and invoke the function.
func (c *Chain) Call(fn interface{}, args ...interface{}) *Chain {
// Wrap the function and arguments into an executable step
wrappedFn, err := c.wrapCallable(fn, args...)
if err != nil {
// Panic on setup errors to catch them early
panic(fmt.Sprintf("Chain.Call setup error: %v", err))
}
// Add the wrapped function as a step
step := chainStep{execute: wrappedFn, config: stepConfig{}}
c.steps = append(c.steps, step)
c.lastStep = &c.steps[len(c.steps)-1]
return c
}
// Optional marks the last step as optional.
// Optional steps don't stop the chain on error.
func (c *Chain) Optional() *Chain {
if c.lastStep == nil {
// Panic if no step exists to mark as optional
panic("Chain.Optional: must call Step() or Call() before Optional()")
}
c.lastStep.optional = true
return c
}
// WithLog adds logging attributes to the last step.
func (c *Chain) WithLog(attrs ...slog.Attr) *Chain {
if c.lastStep == nil {
// Panic if no step exists to configure
panic("Chain.WithLog: must call Step() or Call() before WithLog()")
}
// Append attributes to the step's logging configuration
c.lastStep.config.logAttrs = append(c.lastStep.config.logAttrs, attrs...)
return c
}
// Timeout sets a timeout for the entire chain.
func (c *Chain) Timeout(d time.Duration) *Chain {
c.config.timeout = d
return c
}
// MaxErrors sets the maximum number of errors allowed.
func (c *Chain) MaxErrors(max int) *Chain {
if max <= 0 {
c.config.maxErrors = -1 // No limit
} else {
c.config.maxErrors = max
}
return c
}
// With adds a key-value pair to the last step's context.
func (c *Chain) With(key string, value interface{}) *Chain {
if c.lastStep == nil {
// Panic if no step exists to configure
panic("Chain.With: must call Step() or Call() before With()")
}
// Initialize context map if nil
if c.lastStep.config.context == nil {
c.lastStep.config.context = make(map[string]interface{})
}
// Add the key-value pair
c.lastStep.config.context[key] = value
return c
}
// Tag sets an error category for the last step.
func (c *Chain) Tag(category ErrorCategory) *Chain {
if c.lastStep == nil {
// Panic if no step exists to configure
panic("Chain.Tag: must call Step() or Call() before Tag()")
}
c.lastStep.config.category = category
return c
}
// Code sets a numeric error code for the last step.
func (c *Chain) Code(code int) *Chain {
if c.lastStep == nil {
// Panic if no step exists to configure
panic("Chain.Code: must call Step() or Call() before Code()")
}
c.lastStep.config.code = code
return c
}
// Retry configures retry behavior for the last step.
// Retry configures retry behavior for the last step.
func (c *Chain) Retry(maxAttempts int, delay time.Duration, opts ...RetryOption) *Chain {
if c.lastStep == nil {
panic("Chain.Retry: must call Step() or Call() before Retry()")
}
if maxAttempts < 1 {
maxAttempts = 1
}
// Define default retry options
retryOpts := []RetryOption{
WithMaxAttempts(maxAttempts),
WithDelay(delay),
WithRetryIf(func(err error) bool { return IsRetryable(err) }),
}
// Add logging for retry attempts if a handler is configured
if c.logHandler != nil {
step := c.lastStep
retryOpts = append(retryOpts, WithOnRetry(func(attempt int, err error) {
// Prepare logging attributes
logAttrs := []slog.Attr{
slog.Int("attempt", attempt),
slog.Int("max_attempts", maxAttempts),
}
// Enhance the error with step context
enhancedErr := c.enhanceError(err, step)
// Log the retry attempt
c.logError(enhancedErr, fmt.Sprintf("Retrying step (attempt %d/%d)", attempt, maxAttempts), step.config, logAttrs...)
}))
}
// Append any additional retry options
retryOpts = append(retryOpts, opts...)
// Create and assign the retry configuration
c.lastStep.config.retry = NewRetry(retryOpts...)
return c
}
// LogOnFail enables automatic logging of errors for the last step.
func (c *Chain) LogOnFail() *Chain {
if c.lastStep == nil {
// Panic if no step exists to configure
panic("Chain.LogOnFail: must call Step() or Call() before LogOnFail()")
}
c.lastStep.config.logOnFail = true
return c
}
// Run executes the chain, stopping on the first non-optional error.
// It returns the first error encountered or nil if all steps succeed.
func (c *Chain) Run() error {
// Create a context with timeout or cancellation
ctx, cancel := c.getContextAndCancel()
defer cancel()
c.cancel = cancel
// Clear any previous errors
c.errors = c.errors[:0]
// Execute each step in sequence
for i := range c.steps {
step := &c.steps[i]
// Check if the context has been canceled
select {
case <-ctx.Done():
err := ctx.Err()
// Enhance the error with step context
enhancedErr := c.enhanceError(err, step)
c.errors = append(c.errors, enhancedErr)
// Log the context error
c.logError(enhancedErr, "Chain stopped due to context error before step", step.config)
return enhancedErr
default:
}
// Execute the step
err := c.executeStep(ctx, step)
if err != nil {
// Enhance the error with step context
enhancedErr := c.enhanceError(err, step)
c.errors = append(c.errors, enhancedErr)
// Log the error if required
if step.config.logOnFail || !step.optional {
logMsg := "Chain stopped due to error in step"
if step.optional {
logMsg = "Optional step failed"
}
c.logError(enhancedErr, logMsg, step.config)
}
// Stop execution if the step is not optional
if !step.optional {
return enhancedErr
}
}
}
// Return nil if all steps completed successfully
return nil
}
// RunAll executes all steps, collecting errors without stopping.
// It returns a MultiError containing all errors or nil if none occurred.
func (c *Chain) RunAll() error {
ctx, cancel := c.getContextAndCancel()
defer cancel()
c.cancel = cancel
c.errors = c.errors[:0]
multi := NewMultiError()
for i := range c.steps {
step := &c.steps[i]
select {
case <-ctx.Done():
err := ctx.Err()
enhancedErr := c.enhanceError(err, step)
c.errors = append(c.errors, enhancedErr)
multi.Add(enhancedErr)
c.logError(enhancedErr, "Chain stopped due to context error before step (RunAll)", step.config)
goto endRunAll
default:
}
err := c.executeStep(ctx, step)
if err != nil {
enhancedErr := c.enhanceError(err, step)
c.errors = append(c.errors, enhancedErr)
multi.Add(enhancedErr)
if step.config.logOnFail && c.logHandler != nil {
c.logError(enhancedErr, "Step failed during RunAll", step.config)
}
if c.config.maxErrors > 0 && multi.Count() >= c.config.maxErrors {
if c.logHandler != nil {
// Create a logger to log the max errors condition
logger := slog.New(c.logHandler)
logger.LogAttrs(
context.Background(),
slog.LevelError,
fmt.Sprintf("Stopping RunAll after reaching max errors (%d)", c.config.maxErrors),
slog.Int("max_errors", c.config.maxErrors),
)
}
goto endRunAll
}
}
}
endRunAll:
return multi.Single()
}
// Errors returns a copy of the collected errors.
func (c *Chain) Errors() []error {
if len(c.errors) == 0 {
return nil
}
// Create a copy to prevent external modification
errs := make([]error, len(c.errors))
copy(errs, c.errors)
return errs
}
// Len returns the number of steps in the chain.
func (c *Chain) Len() int {
return len(c.steps)
}
// HasErrors checks if any errors were collected.
func (c *Chain) HasErrors() bool {
return len(c.errors) > 0
}
// LastError returns the most recent error or nil if none exist.
func (c *Chain) LastError() error {
if len(c.errors) > 0 {
return c.errors[len(c.errors)-1]
}
return nil
}
// Reset clears the chain's steps, errors, and context.
func (c *Chain) Reset() {
if c.cancel != nil {
// Cancel any active context
c.cancel()
c.cancel = nil
}
// Clear steps and errors
c.steps = c.steps[:0]
c.errors = c.errors[:0]
c.lastStep = nil
}
// Unwrap returns the collected errors (alias for Errors).
func (c *Chain) Unwrap() []error {
return c.errors
}
// getContextAndCancel creates a context based on the chain's timeout.
// It returns a context and its cancellation function.
func (c *Chain) getContextAndCancel() (context.Context, context.CancelFunc) {
parentCtx := context.Background()
if c.config.timeout > 0 {
// Create a context with a timeout
return context.WithTimeout(parentCtx, c.config.timeout)
}
// Create a cancellable context
return context.WithCancel(parentCtx)
}
// logError logs an error with step-specific context and attributes.
// It only logs if a handler is configured and the error is non-nil.
func (c *Chain) logError(err error, msg string, config stepConfig, additionalAttrs ...slog.Attr) {
// Skip logging if no handler is set or error is nil
if c == nil || c.logHandler == nil || err == nil {
return
}
// Create a logger on demand using the configured handler
logger := slog.New(c.logHandler)
// Initialize attributes with error and timestamp
allAttrs := make([]slog.Attr, 0, 5+len(config.logAttrs)+len(additionalAttrs))
allAttrs = append(allAttrs, slog.Any("error", err))
allAttrs = append(allAttrs, slog.Time("timestamp", time.Now()))
// Add step-specific metadata
if config.category != "" {
allAttrs = append(allAttrs, slog.String("category", string(config.category)))
}
if config.code != 0 {
allAttrs = append(allAttrs, slog.Int("code", config.code))
}
for k, v := range config.context {
allAttrs = append(allAttrs, slog.Any(k, v))
}
allAttrs = append(allAttrs, config.logAttrs...)
allAttrs = append(allAttrs, additionalAttrs...)
// Add stack trace and error name if the error is of type *Error
if e, ok := err.(*Error); ok {
if stack := e.Stack(); len(stack) > 0 {
// Format stack trace, truncating if too long
stackStr := "\n\t" + strings.Join(stack, "\n\t")
if len(stackStr) > 1000 {
stackStr = stackStr[:1000] + "..."
}
allAttrs = append(allAttrs, slog.String("stacktrace", stackStr))
}
if name := e.Name(); name != "" {
allAttrs = append(allAttrs, slog.String("error_name", name))
}
}
// Log the error at ERROR level with all attributes
// Use a defer to catch any panics during logging
defer func() {
if r := recover(); r != nil {
// Print to stdout to avoid infinite recursion
fmt.Printf("ERROR: Recovered from panic during logging: %v\nAttributes: %v\n", r, allAttrs)
}
}()
logger.LogAttrs(context.Background(), slog.LevelError, msg, allAttrs...)
}
// wrapCallable wraps a function and its arguments into an executable step.
// It uses reflection to validate the function and arguments.
func (c *Chain) wrapCallable(fn interface{}, args ...interface{}) (func() error, error) {
val := reflect.ValueOf(fn)
typ := val.Type()
// Ensure the provided value is a function
if typ.Kind() != reflect.Func {
return nil, fmt.Errorf("provided 'fn' is not a function (got %T)", fn)
}
// Check if the number of arguments matches the function's signature
if typ.NumIn() != len(args) {
return nil, fmt.Errorf("function expects %d arguments, but %d were provided", typ.NumIn(), len(args))
}
// Prepare argument values
argVals := make([]reflect.Value, len(args))
errorType := reflect.TypeOf((*error)(nil)).Elem()
for i, arg := range args {
expectedType := typ.In(i)
var providedVal reflect.Value
if arg != nil {
providedVal = reflect.ValueOf(arg)
// Check if the argument type is assignable to the expected type
if !providedVal.Type().AssignableTo(expectedType) {
// Special case for error interfaces
if expectedType.Kind() == reflect.Interface && expectedType.Implements(errorType) && providedVal.Type().Implements(errorType) {
// Allow error interface
} else {
return nil, fmt.Errorf("argument %d type mismatch: expected %s, got %s", i, expectedType, providedVal.Type())
}
}
} else {
// Handle nil arguments for nullable types
switch expectedType.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Pointer, reflect.Slice:
providedVal = reflect.Zero(expectedType)
default:
return nil, fmt.Errorf("argument %d is nil, but expected non-nillable type %s", i, expectedType)
}
}
argVals[i] = providedVal
}
// Validate the function's return type
if typ.NumOut() > 1 || (typ.NumOut() == 1 && !typ.Out(0).Implements(errorType)) {
return nil, fmt.Errorf("function must return either no values or a single error (got %d return values)", typ.NumOut())
}
// Return a wrapped function that calls the original with the provided arguments
return func() error {
results := val.Call(argVals)
if len(results) == 1 && results[0].Interface() != nil {
return results[0].Interface().(error)
}
return nil
}, nil
}
// executeStep runs a single step, applying retries if configured.
func (c *Chain) executeStep(ctx context.Context, step *chainStep) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
if step.config.retry != nil {
retry := step.config.retry.Transform(WithContext(ctx))
// Wrap step execution to respect context
wrappedFn := func() error {
type result struct {
err error
}
done := make(chan result, 1)
go func() {
done <- result{err: step.execute()}
}()
select {
case res := <-done:
return res.err
case <-ctx.Done():
return ctx.Err()
}
}
return retry.Execute(wrappedFn)
}
// Non-retry case also respects context
type result struct {
err error
}
done := make(chan result, 1)
go func() {
done <- result{err: step.execute()}
}()
select {
case res := <-done:
return res.err
case <-ctx.Done():
return ctx.Err()
}
}
// enhanceError wraps an error with additional context from the step.
func (c *Chain) enhanceError(err error, step *chainStep) error {
if err == nil || !c.config.autoWrap {
// Return the error unchanged if nil or autoWrap is disabled
return err
}
// Initialize the base error
var baseError *Error
if e, ok := err.(*Error); ok {
// Copy existing *Error to preserve its properties
baseError = e.Copy()
} else {
// Create a new *Error wrapping the original
baseError = New(err.Error()).Wrap(err).WithStack()
}
if step != nil {
// Add step-specific context to the error
if step.config.category != "" && baseError.Category() == "" {
baseError.WithCategory(step.config.category)
}
if step.config.code != 0 && baseError.Code() == 0 {
baseError.WithCode(step.config.code)
}
for k, v := range step.config.context {
baseError.With(k, v)
}
for _, attr := range step.config.logAttrs {
baseError.With(attr.Key, attr.Value.Any())
}
if step.config.retry != nil && !baseError.HasContextKey(ctxRetry) {
// Mark the error as retryable if retries are configured
baseError.WithRetryable()
}
}
return baseError
}

File diff suppressed because it is too large Load Diff

View File

@@ -31,7 +31,7 @@ func Code(err error) int {
if e, ok := err.(*Error); ok {
return e.Code()
}
return 500
return DefaultCode
}
// Context extracts the context map from an error, if it is an *Error.
@@ -63,7 +63,12 @@ func Convert(err error) *Error {
}
// Manual unwrapping as fallback
visited := make(map[error]bool)
for unwrapped := err; unwrapped != nil; {
if visited[unwrapped] {
break // Cycle detected
}
visited[unwrapped] = true
if e, ok := unwrapped.(*Error); ok {
return e
}
@@ -229,12 +234,16 @@ func IsRetryable(err error) bool {
}
}
}
// Fallback to context map
// Check regular context
if e.context != nil {
if val, ok := e.context[ctxRetry].(bool); ok {
return val
}
}
// Check cause recursively
if e.cause != nil {
return IsRetryable(e.cause)
}
}
lowerMsg := strings.ToLower(err.Error())
return IsTimeout(err) || strings.Contains(lowerMsg, "retry")

225
vendor/github.com/olekukonko/errors/inspect.go generated vendored Normal file
View File

@@ -0,0 +1,225 @@
// File: inspect.go
// Updated to support both error and *Error with delegation for cleaner *Error handling
package errors
import (
stderrs "errors"
"fmt"
"strings"
"time"
)
// Inspect provides detailed examination of an error, handling both single errors and MultiError
func Inspect(err error) {
if err == nil {
fmt.Println("No error occurred")
return
}
fmt.Printf("\n=== Error Inspection ===\n")
fmt.Printf("Top-level error: %v\n", err)
fmt.Printf("Top-level error type: %T\n", err)
// Handle *Error directly
if e, ok := err.(*Error); ok {
InspectError(e)
return
}
// Handle MultiError
if multi, ok := err.(*MultiError); ok {
allErrors := multi.Errors()
fmt.Printf("\nContains %d errors:\n", len(allErrors))
for i, e := range allErrors {
fmt.Printf("\n--- Error %d ---\n", i+1)
inspectSingleError(e)
}
} else {
// Inspect single error if not MultiError or *Error
fmt.Println("\n--- Details ---")
inspectSingleError(err)
}
// Additional diagnostics
fmt.Println("\n--- Diagnostics ---")
if IsRetryable(err) {
fmt.Println("- Error chain contains retryable errors")
}
if IsTimeout(err) {
fmt.Println("- Error chain contains timeout errors")
}
if code := getErrorCode(err); code != 0 {
fmt.Printf("- Highest priority error code: %d\n", code)
}
fmt.Printf("========================\n\n")
}
// InspectError provides detailed inspection of a specific *Error instance
func InspectError(err *Error) {
if err == nil {
fmt.Println("No error occurred")
return
}
fmt.Printf("\n=== Error Inspection (*Error) ===\n")
fmt.Printf("Top-level error: %v\n", err)
fmt.Printf("Top-level error type: %T\n", err)
fmt.Println("\n--- Details ---")
inspectSingleError(err) // Delegate to handle unwrapping and details
// Additional diagnostics specific to *Error
fmt.Println("\n--- Diagnostics ---")
if IsRetryable(err) {
fmt.Println("- Error is retryable")
}
if IsTimeout(err) {
fmt.Println("- Error chain contains timeout errors")
}
if code := err.Code(); code != 0 {
fmt.Printf("- Error code: %d\n", code)
}
fmt.Printf("========================\n\n")
}
// inspectSingleError handles inspection of a single error (may be part of a chain)
func inspectSingleError(err error) {
if err == nil {
fmt.Println(" (nil error)")
return
}
fmt.Printf(" Error: %v\n", err)
fmt.Printf(" Type: %T\n", err)
// Handle wrapped errors, including *Error type
var currentErr error = err
depth := 0
for currentErr != nil {
prefix := strings.Repeat(" ", depth+1)
if depth > 0 {
fmt.Printf("%sWrapped Cause (%T): %v\n", prefix, currentErr, currentErr)
}
// Check if it's our specific *Error type
if e, ok := currentErr.(*Error); ok {
if name := e.Name(); name != "" {
fmt.Printf("%sName: %s\n", prefix, name)
}
if cat := e.Category(); cat != "" {
fmt.Printf("%sCategory: %s\n", prefix, cat)
}
if code := e.Code(); code != 0 {
fmt.Printf("%sCode: %d\n", prefix, code)
}
if ctx := e.Context(); len(ctx) > 0 {
fmt.Printf("%sContext:\n", prefix)
for k, v := range ctx {
fmt.Printf("%s %s: %v\n", prefix, k, v)
}
}
if stack := e.Stack(); len(stack) > 0 {
fmt.Printf("%sStack (Top 3):\n", prefix)
limit := 3
if len(stack) < limit {
limit = len(stack)
}
for i := 0; i < limit; i++ {
fmt.Printf("%s %s\n", prefix, stack[i])
}
if len(stack) > limit {
fmt.Printf("%s ... (%d more frames)\n", prefix, len(stack)-limit)
}
}
}
// Unwrap using standard errors.Unwrap and handle *Error Unwrap
var nextErr error
// Prioritize *Error's Unwrap if available AND it returns non-nil
if e, ok := currentErr.(*Error); ok {
unwrapped := e.Unwrap()
if unwrapped != nil {
nextErr = unwrapped
} else {
// If *Error.Unwrap returns nil, fall back to standard unwrap
// This handles cases where *Error might wrap a non-standard error
// or where its internal cause is deliberately nil.
nextErr = stderrs.Unwrap(currentErr)
}
} else {
nextErr = stderrs.Unwrap(currentErr) // Fall back to standard unwrap for non-*Error types
}
// Prevent infinite loops if Unwrap returns the same error, or stop if no more unwrapping
if nextErr == currentErr || nextErr == nil {
break
}
currentErr = nextErr
depth++
if depth > 10 { // Safety break for very deep or potentially cyclic chains
fmt.Printf("%s... (chain too deep or potential cycle)\n", strings.Repeat(" ", depth+1))
break
}
}
}
// getErrorCode traverses the error chain to find the highest priority code.
// It uses errors.As to find the first *Error in the chain.
func getErrorCode(err error) int {
var code int = 0 // Default code
var target *Error
if As(err, &target) { // Use the package's As helper
if target != nil { // Add nil check for safety
code = target.Code()
}
}
// If the top-level error is *Error and has a code, it might take precedence.
// This depends on desired logic. Let's keep it simple for now: first code found by As.
if code == 0 { // Only check top-level if As didn't find one with a code
if e, ok := err.(*Error); ok {
code = e.Code()
}
}
return code
}
// handleError demonstrates using Inspect with additional handling logic
func handleError(err error) {
fmt.Println("\n=== Processing Failure ===")
Inspect(err) // Use the primary Inspect function
// Additional handling based on inspection
code := getErrorCode(err) // Use the helper
switch {
case IsTimeout(err):
fmt.Println("\nAction: Check connectivity or increase timeout")
case code == 402: // Check code obtained via helper
fmt.Println("\nAction: Payment processing failed - notify billing")
default:
fmt.Println("\nAction: Generic failure handling")
}
}
// processOrder demonstrates Chain usage with Inspect
func processOrder() error {
validateInput := func() error { return nil }
processPayment := func() error { return stderrs.New("credit card declined") }
sendNotification := func() error { fmt.Println("Notification sent."); return nil }
logOrder := func() error { fmt.Println("Order logged."); return nil }
chain := NewChain(ChainWithTimeout(2*time.Second)).
Step(validateInput).Tag("validation").
Step(processPayment).Tag("billing").Code(402).Retry(3, 100*time.Millisecond, WithRetryIf(IsRetryable)).
Step(sendNotification).Optional().
Step(logOrder)
err := chain.Run()
if err != nil {
handleError(err) // Call the unified error handler
return err // Propagate the error if needed
}
fmt.Println("Order processed successfully!")
return nil
}

View File

@@ -1,11 +1,13 @@
package errors
import (
"bytes"
"encoding/json"
"fmt"
"math/rand"
"strings"
"sync"
"time"
"sync/atomic"
)
// MultiError represents a thread-safe collection of errors with enhanced features.
@@ -45,40 +47,56 @@ func NewMultiError(opts ...MultiErrorOption) *MultiError {
// Add appends an error to the collection with optional sampling, limit checks, and duplicate prevention.
// Ignores nil errors and duplicates based on string equality; thread-safe.
func (m *MultiError) Add(err error) {
if err == nil {
func (m *MultiError) Add(errs ...error) {
if len(errs) == 0 {
return
}
m.mu.Lock()
defer m.mu.Unlock()
// Check for duplicates by comparing error messages
for _, e := range m.errors {
if e.Error() == err.Error() {
return
for _, err := range errs {
if err == nil {
continue
}
}
// Apply sampling if enabled and collection isnt empty
if m.sampling && len(m.errors) > 0 {
var r uint32
if m.rand != nil {
r = uint32(m.rand.Int31n(100))
} else {
r = fastRand() % 100
// Check for duplicates by comparing error messages
duplicate := false
for _, e := range m.errors {
if e.Error() == err.Error() {
duplicate = true
break
}
}
if r > m.sampleRate { // Accept if random value is within sample rate
return
if duplicate {
continue
}
}
// Respect limit if set
if m.limit > 0 && len(m.errors) >= m.limit {
return
}
// Apply sampling if enabled and collection isnt empty
if m.sampling && len(m.errors) > 0 {
var r uint32
if m.rand != nil {
r = uint32(m.rand.Int31n(100))
} else {
r = fastRand() % 100
}
if r > m.sampleRate { // Accept if random value is within sample rate
continue
}
}
m.errors = append(m.errors, err)
// Respect limit if set
if m.limit > 0 && len(m.errors) >= m.limit {
continue
}
m.errors = append(m.errors, err)
}
}
// Addf formats and adds a new error to the collection.
func (m *MultiError) Addf(format string, args ...interface{}) {
m.Add(Newf(format, args...))
}
// Clear removes all errors from the collection.
@@ -300,6 +318,75 @@ func WithRand(r *rand.Rand) MultiErrorOption {
}
}
// MarshalJSON serializes the MultiError to JSON, including all contained errors and configuration metadata.
// Thread-safe; errors are serialized using their MarshalJSON method if available, otherwise as strings.
func (m *MultiError) MarshalJSON() ([]byte, error) {
m.mu.RLock()
defer m.mu.RUnlock()
// Get buffer from pool for efficiency
buf := jsonBufferPool.Get().(*bytes.Buffer)
defer jsonBufferPool.Put(buf)
buf.Reset()
// Create encoder
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
// Define JSON structure
type jsonError struct {
Error interface{} `json:"error"` // Holds either JSON-marshaled error or string
}
je := struct {
Count int `json:"count"` // Number of errors
Limit int `json:"limit,omitempty"` // Maximum error limit (omitted if 0)
Sampling bool `json:"sampling,omitempty"` // Whether sampling is enabled
SampleRate uint32 `json:"sample_rate,omitempty"` // Sampling rate (1-100, omitted if not sampling)
Errors []jsonError `json:"errors"` // List of errors
}{
Count: len(m.errors),
Limit: m.limit,
Sampling: m.sampling,
SampleRate: m.sampleRate,
}
// Serialize each error
je.Errors = make([]jsonError, len(m.errors))
for i, err := range m.errors {
if err == nil {
je.Errors[i] = jsonError{Error: nil}
continue
}
// Check if the error implements json.Marshaler
if marshaler, ok := err.(json.Marshaler); ok {
marshaled, err := marshaler.MarshalJSON()
if err != nil {
// Fallback to string if marshaling fails
je.Errors[i] = jsonError{Error: err.Error()}
} else {
var raw json.RawMessage = marshaled
je.Errors[i] = jsonError{Error: raw}
}
} else {
// Use error string for non-marshaler errors
je.Errors[i] = jsonError{Error: err.Error()}
}
}
// Encode JSON
if err := enc.Encode(je); err != nil {
return nil, fmt.Errorf("failed to marshal MultiError: %v", err)
}
// Remove trailing newline
result := buf.Bytes()
if len(result) > 0 && result[len(result)-1] == '\n' {
result = result[:len(result)-1]
}
return result, nil
}
// defaultFormat provides the default formatting for multiple errors.
// Returns a semicolon-separated list prefixed with the error count (e.g., "errors(3): err1; err2; err3").
func defaultFormat(errs []error) string {
@@ -316,10 +403,21 @@ func defaultFormat(errs []error) string {
// fastRand generates a quick pseudo-random number for sampling.
// Uses a simple xorshift algorithm based on the current time; not cryptographically secure.
var fastRandState uint32 = 1 // Must be non-zero
func fastRand() uint32 {
r := uint32(time.Now().UnixNano())
r ^= r << 13
r ^= r >> 17
r ^= r << 5
return r
for {
// Atomically load the current state
old := atomic.LoadUint32(&fastRandState)
// Xorshift computation
x := old
x ^= x << 13
x ^= x >> 17
x ^= x << 5
// Attempt to store the new state atomically
if atomic.CompareAndSwapUint32(&fastRandState, old, x) {
return x
}
// Otherwise retry
}
}

View File

@@ -103,42 +103,124 @@ func (r *Retry) Execute(fn func() error) error {
var lastErr error
for attempt := 1; attempt <= r.maxAttempts; attempt++ {
// Check context before each attempt
select {
case <-r.ctx.Done():
return r.ctx.Err()
default:
}
err := fn()
if err == nil {
return nil
}
// Check if retry is applicable; return immediately if not retryable
lastErr = err
// Check if we should retry
if r.retryIf != nil && !r.retryIf(err) {
return err
}
lastErr = err
if r.onRetry != nil {
r.onRetry(attempt, err)
}
// Exit if this was the last attempt
// Don't delay after last attempt
if attempt == r.maxAttempts {
break
}
// Calculate delay with backoff, cap at maxDelay, and apply jitter if enabled
// Calculate delay with backoff
delay := r.backoff.Backoff(attempt, r.delay)
if r.maxDelay > 0 && delay > r.maxDelay {
delay = r.maxDelay
}
if r.jitter {
delay = addJitter(delay)
}
// Wait with context
select {
case <-r.ctx.Done():
return r.ctx.Err()
case <-time.After(delay):
}
}
return lastErr
}
// ExecuteContext runs the provided function with retry logic, respecting context cancellation.
// Returns nil on success or the last error if all attempts fail or context is cancelled.
func (r *Retry) ExecuteContext(ctx context.Context, fn func() error) error {
var lastErr error
// If the retry instance already has a context, use it. Otherwise, use the provided one.
// If both are provided, maybe create a derived context? For now, prioritize the one from WithContext.
execCtx := r.ctx
if execCtx == context.Background() && ctx != nil { // Use provided ctx if retry ctx is default and provided one isn't nil
execCtx = ctx
} else if ctx == nil { // Ensure we always have a non-nil context
execCtx = context.Background()
}
// Note: This logic might need refinement depending on how contexts should interact.
// A safer approach might be: if r.ctx != background, use it. Else use provided ctx.
for attempt := 1; attempt <= r.maxAttempts; attempt++ {
// Check context before executing the function
select {
case <-execCtx.Done():
return execCtx.Err() // Return context error immediately
default:
// Context is okay, proceed
}
err := fn()
if err == nil {
return nil // Success
}
// Check if retry is applicable based on the error
if r.retryIf != nil && !r.retryIf(err) {
return err // Not retryable, return the error
}
lastErr = err // Store the last encountered error
// Execute the OnRetry callback if configured
if r.onRetry != nil {
r.onRetry(attempt, err)
}
// Exit loop if this was the last attempt
if attempt == r.maxAttempts {
break
}
// --- Calculate and apply delay ---
currentDelay := r.backoff.Backoff(attempt, r.delay)
if currentDelay > r.maxDelay {
if r.maxDelay > 0 && currentDelay > r.maxDelay { // Check maxDelay > 0 before capping
currentDelay = r.maxDelay
}
if r.jitter {
currentDelay = addJitter(currentDelay)
}
// Wait with respect to context cancellation or timeout
if currentDelay < 0 { // Ensure delay isn't negative after jitter
currentDelay = 0
}
// --- Wait for the delay or context cancellation ---
select {
case <-r.ctx.Done():
return r.ctx.Err()
case <-execCtx.Done():
// If context is cancelled during the wait, return the context error
// Often more informative than returning the last application error.
return execCtx.Err()
case <-time.After(currentDelay):
// Wait finished, continue to the next attempt
}
}
// All attempts failed, return the last error encountered
return lastErr
}

View File

@@ -7,6 +7,7 @@ import (
"os"
"sort"
"strings"
"time"
)
// Palette defines ANSI color codes for various log components.
@@ -76,8 +77,10 @@ var lightPalette = Palette{
// writing the result to the provided writer.
// Thread-safe if the underlying writer is thread-safe.
type ColorizedHandler struct {
w io.Writer // Destination for colored log output
palette Palette // Color scheme for formatting
w io.Writer // Destination for colored log output
palette Palette // Color scheme for formatting
showTime bool // Whether to display timestamps
timeFormat string // Format for timestamps (defaults to time.RFC3339)
}
// ColorOption defines a configuration function for ColorizedHandler.
@@ -104,7 +107,12 @@ func WithColorPallet(pallet Palette) ColorOption {
// logger := ll.New("app").Enable().Handler(handler)
// logger.Info("Test") // Output: [app] <colored INFO>: Test
func NewColorizedHandler(w io.Writer, opts ...ColorOption) *ColorizedHandler {
c := &ColorizedHandler{w: w} // Initialize with writer
// Initialize with writer
c := &ColorizedHandler{w: w,
showTime: false,
timeFormat: time.RFC3339,
}
// Apply configuration options
for _, opt := range opts {
opt(c)
@@ -136,6 +144,19 @@ func (h *ColorizedHandler) Handle(e *lx.Entry) error {
}
}
// Timestamped enables or disables timestamp display and optionally sets a custom time format.
// If format is empty, defaults to RFC3339.
// Example:
//
// handler := NewColorizedHandler(os.Stdout).Timestamped(true, time.StampMilli)
// // Output: Jan 02 15:04:05.000 [app] INFO: Test
func (h *ColorizedHandler) Timestamped(enable bool, format ...string) {
h.showTime = enable
if len(format) > 0 && format[0] != "" {
h.timeFormat = format[0]
}
}
// handleRegularOutput handles normal log entries.
// It formats the entry with colored namespace, level, message, fields, and stack trace (if present),
// writing the result to the handler's writer.
@@ -146,6 +167,12 @@ func (h *ColorizedHandler) Handle(e *lx.Entry) error {
func (h *ColorizedHandler) handleRegularOutput(e *lx.Entry) error {
var builder strings.Builder // Buffer for building formatted output
// Add timestamp if enabled
if h.showTime {
builder.WriteString(e.Timestamp.Format(h.timeFormat))
builder.WriteString(lx.Space)
}
// Format namespace with colors
h.formatNamespace(&builder, e)
@@ -345,6 +372,13 @@ func (h *ColorizedHandler) formatStack(b *strings.Builder, stack []byte) {
// h.handleDumpOutput(&lx.Entry{Class: lx.ClassDump, Message: "pos 00 hex: 61 62 'ab'"}) // Writes colored dump
func (h *ColorizedHandler) handleDumpOutput(e *lx.Entry) error {
var builder strings.Builder
// Add timestamp if enabled
if h.showTime {
builder.WriteString(e.Timestamp.Format(h.timeFormat))
builder.WriteString(lx.Newline)
}
// Write colored BEGIN separator
builder.WriteString(h.palette.Title)
builder.WriteString("---- BEGIN DUMP ----")

View File

@@ -11,8 +11,10 @@ import (
// Useful for testing or buffering logs for later inspection.
// It maintains a thread-safe slice of log entries, protected by a read-write mutex.
type MemoryHandler struct {
mu sync.RWMutex // Protects concurrent access to entries
entries []*lx.Entry // Slice of stored log entries
mu sync.RWMutex // Protects concurrent access to entries
entries []*lx.Entry // Slice of stored log entries
showTime bool // Whether to show timestamps when dumping
timeFormat string // Time format for dumping
}
// NewMemoryHandler creates a new MemoryHandler.
@@ -28,6 +30,23 @@ func NewMemoryHandler() *MemoryHandler {
}
}
// Timestamped enables/disables timestamp display when dumping and optionally sets a time format.
// Consistent with TextHandler and ColorizedHandler signature.
// Example:
//
// handler.Timestamped(true) // Enable with default format
// handler.Timestamped(true, time.StampMilli) // Enable with custom format
// handler.Timestamped(false) // Disable
func (h *MemoryHandler) Timestamped(enable bool, format ...string) {
h.mu.Lock()
defer h.mu.Unlock()
h.showTime = enable
if len(format) > 0 && format[0] != "" {
h.timeFormat = format[0]
}
}
// Handle stores the log entry in memory.
// It appends the provided entry to the entries slice, ensuring thread-safety with a write lock.
// Always returns nil, as it does not perform I/O operations.
@@ -82,6 +101,7 @@ func (h *MemoryHandler) Dump(w io.Writer) error {
// Create a temporary TextHandler to format entries
tempHandler := NewTextHandler(w)
tempHandler.Timestamped(h.showTime, h.timeFormat)
// Process each entry through the TextHandler
for _, entry := range h.entries {

View File

@@ -6,6 +6,7 @@ import (
"io"
"sort"
"strings"
"time"
)
// TextHandler is a handler that outputs log entries as plain text.
@@ -13,7 +14,9 @@ import (
// writing the result to the provided writer.
// Thread-safe if the underlying writer is thread-safe.
type TextHandler struct {
w io.Writer // Destination for formatted log output
w io.Writer // Destination for formatted log output
showTime bool // Whether to display timestamps
timeFormat string // Format for timestamps (defaults to time.RFC3339)
}
// NewTextHandler creates a new TextHandler writing to the specified writer.
@@ -24,7 +27,24 @@ type TextHandler struct {
// logger := ll.New("app").Enable().Handler(handler)
// logger.Info("Test") // Output: [app] INFO: Test
func NewTextHandler(w io.Writer) *TextHandler {
return &TextHandler{w: w}
return &TextHandler{
w: w,
showTime: false,
timeFormat: time.RFC3339,
}
}
// Timestamped enables or disables timestamp display and optionally sets a custom time format.
// If format is empty, defaults to RFC3339.
// Example:
//
// handler := NewTextHandler(os.Stdout).TextWithTime(true, time.StampMilli)
// // Output: Jan 02 15:04:05.000 [app] INFO: Test
func (h *TextHandler) Timestamped(enable bool, format ...string) {
h.showTime = enable
if len(format) > 0 && format[0] != "" {
h.timeFormat = format[0]
}
}
// Handle processes a log entry and writes it as plain text.
@@ -60,6 +80,12 @@ func (h *TextHandler) Handle(e *lx.Entry) error {
func (h *TextHandler) handleRegularOutput(e *lx.Entry) error {
var builder strings.Builder // Buffer for building formatted output
// Add timestamp if enabled
if h.showTime {
builder.WriteString(e.Timestamp.Format(h.timeFormat))
builder.WriteString(lx.Space)
}
// Format namespace based on style
switch e.Style {
case lx.NestedPath:
@@ -140,6 +166,12 @@ func (h *TextHandler) handleDumpOutput(e *lx.Entry) error {
// For text handler, we just add a newline before dump output
var builder strings.Builder // Buffer for building formatted output
// Add timestamp if enabled
if h.showTime {
builder.WriteString(e.Timestamp.Format(h.timeFormat))
builder.WriteString(lx.Newline)
}
// Add separator lines and dump content
builder.WriteString("---- BEGIN DUMP ----\n")
builder.WriteString(e.Message)

View File

@@ -1116,6 +1116,24 @@ func (l *Logger) Style(style lx.StyleType) *Logger {
return l
}
// Timestamped enables or disables timestamp logging for the logger and optionally sets the timestamp format.
// It is thread-safe, using a write lock to ensure safe concurrent access.
// If the logger's handler supports the lx.Timestamper interface, the timestamp settings are applied.
// The method returns the logger instance to support method chaining.
// Parameters:
//
// enable: Boolean to enable or disable timestamp logging
// format: Optional string(s) to specify the timestamp format
func (l *Logger) Timestamped(enable bool, format ...string) *Logger {
l.mu.Lock()
defer l.mu.Unlock()
if h, ok := l.handler.(lx.Timestamper); ok {
h.Timestamped(enable, format...)
}
return l
}
// Use adds a middleware function to process log entries before they are handled, returning
// a Middleware handle for removal. Middleware returning a non-nil error stops the log.
// It is thread-safe using a write lock.
@@ -1389,6 +1407,24 @@ func WithHandler(handler lx.Handler) Option {
}
}
// WithTimestamped returns an Option that configures timestamp settings for the logger's existing handler.
// It enables or disables timestamp logging and optionally sets the timestamp format if the handler
// supports the lx.Timestamper interface. If no handler is set, the function has no effect.
// Parameters:
//
// enable: Boolean to enable or disable timestamp logging
// format: Optional string(s) to specify the timestamp format
func WithTimestamped(enable bool, format ...string) Option {
return func(l *Logger) {
if l.handler != nil { // Check if a handler is set
// Verify if the handler supports the lx.Timestamper interface
if h, ok := l.handler.(lx.Timestamper); ok {
h.Timestamped(enable, format...) // Apply timestamp settings to the handler
}
}
}
}
// WithLevel sets the minimum log level for the logger as a functional option for
// configuring a new logger instance.
// Example:

View File

@@ -124,6 +124,16 @@ type Handler interface {
Handle(e *Entry) error // Processes a log entry, returning any error
}
// Timestamper defines an interface for handlers that support timestamp configuration.
// It includes a method to enable or disable timestamp logging and optionally set the timestamp format.
type Timestamper interface {
// Timestamped enables or disables timestamp logging and allows specifying an optional format.
// Parameters:
// enable: Boolean to enable or disable timestamp logging
// format: Optional string(s) to specify the timestamp format
Timestamped(enable bool, format ...string)
}
// ClassType represents the type of a log entry.
// It is an integer type used to categorize log entries (Text, JSON, Dump, Special, Raw),
// influencing how handlers process and format them.

View File

@@ -28,7 +28,7 @@ go get github.com/olekukonko/tablewriter@v0.0.5
#### Latest Version
The latest stable version
```bash
go get github.com/olekukonko/tablewriter@v1.0.7
go get github.com/olekukonko/tablewriter@v1.0.9
```
**Warning:** Version `v1.0.0` contains missing functionality and should not be used.
@@ -62,7 +62,7 @@ func main() {
data := [][]string{
{"Package", "Version", "Status"},
{"tablewriter", "v0.0.5", "legacy"},
{"tablewriter", "v1.0.7", "latest"},
{"tablewriter", "v1.0.9", "latest"},
}
table := tablewriter.NewWriter(os.Stdout)
@@ -77,7 +77,7 @@ func main() {
│ PACKAGE │ VERSION │ STATUS │
├─────────────┼─────────┼────────┤
│ tablewriter │ v0.0.5 │ legacy │
│ tablewriter │ v1.0.7 │ latest │
│ tablewriter │ v1.0.9 │ latest │
└─────────────┴─────────┴────────┘
```

View File

@@ -688,6 +688,12 @@ func (bb *BehaviorConfigBuilder) WithCompactMerge(state tw.State) *BehaviorConfi
return bb
}
// WithAutoHeader enables/disables automatic header extraction for structs in Bulk.
func (bb *BehaviorConfigBuilder) WithAutoHeader(state tw.State) *BehaviorConfigBuilder {
bb.config.Structs.AutoHeader = state
return bb
}
// ColumnConfigBuilder configures column-specific settings
type ColumnConfigBuilder struct {
parent *ConfigBuilder

View File

@@ -717,6 +717,10 @@ func defaultConfig() Config {
Behavior: tw.Behavior{
AutoHide: tw.Off,
TrimSpace: tw.On,
Structs: tw.Struct{
AutoHeader: tw.Off,
Tags: []string{"json", "db"},
},
},
}
}
@@ -844,6 +848,14 @@ func mergeConfig(dst, src Config) Config {
dst.Behavior.Compact = src.Behavior.Compact
dst.Behavior.Header = src.Behavior.Header
dst.Behavior.Footer = src.Behavior.Footer
dst.Behavior.Footer = src.Behavior.Footer
dst.Behavior.Structs.AutoHeader = src.Behavior.Structs.AutoHeader
// check lent of tags
if len(src.Behavior.Structs.Tags) > 0 {
dst.Behavior.Structs.Tags = src.Behavior.Structs.Tags
}
if src.Widths.Global != 0 {
dst.Widths.Global = src.Widths.Global

View File

@@ -523,9 +523,22 @@ func (f *Blueprint) renderLine(ctx tw.Formatting) {
isTotalPattern := false
// Case-insensitive check for "total"
if isHMergeStart && colIndex > 0 {
if prevCellCtx, ok := ctx.Row.Current[colIndex-1]; ok {
if strings.Contains(strings.ToLower(prevCellCtx.Data), "total") {
isTotalPattern = true
f.logger.Debugf("renderLine: total pattern in row in %d", colIndex)
}
}
}
// Get the alignment from the configuration
align = cellCtx.Align
// Override alignment for footer merged cells
if (ctx.Row.Position == tw.Footer && isHMergeStart) || isTotalPattern {
if align != tw.AlignRight {
if align == tw.AlignNone {
f.logger.Debugf("renderLine: Applying AlignRight HMerge/TOTAL override for Footer col %d. Original/default align was: %s", colIndex, align)
align = tw.AlignRight
}

View File

@@ -1,7 +1,6 @@
package tablewriter
import (
"fmt"
"github.com/olekukonko/errors"
"github.com/olekukonko/tablewriter/pkg/twwidth"
"github.com/olekukonko/tablewriter/tw"
@@ -90,7 +89,7 @@ func (t *Table) Start() error {
if !t.renderer.Config().Streaming {
// Check if the configured renderer actually supports streaming.
t.logger.Error("Configured renderer does not support streaming.")
return fmt.Errorf("renderer does not support streaming")
return errors.Newf("renderer does not support streaming")
}
//t.renderer.Start(t.writer)
@@ -208,7 +207,7 @@ func (t *Table) streamAppendRow(row interface{}) error {
rawCellsSlice, err := t.convertCellsToStrings(row, t.config.Row)
if err != nil {
t.logger.Errorf("streamAppendRow: Failed to convert row to strings: %v", err)
return fmt.Errorf("failed to convert row to strings: %w", err)
return errors.Newf("failed to convert row to strings").Wrap(err)
}
if len(rawCellsSlice) == 0 {
@@ -221,7 +220,7 @@ func (t *Table) streamAppendRow(row interface{}) error {
}
if err := t.ensureStreamWidthsCalculated(rawCellsSlice, t.config.Row); err != nil {
return fmt.Errorf("failed to establish stream column count/widths: %w", err)
return errors.New("failed to establish stream column count/widths").Wrap(err)
}
// Now, check for column mismatch if a column count has been established.

View File

@@ -2,7 +2,6 @@ package tablewriter
import (
"bytes"
"fmt"
"github.com/olekukonko/errors"
"github.com/olekukonko/ll"
"github.com/olekukonko/ll/lh"
@@ -180,65 +179,87 @@ func (t *Table) Caption(caption tw.Caption) *Table { // This is the one we modif
// This method always contributes to a single logical row in the table.
// To add multiple distinct rows, call Append multiple times (once for each row's data)
// or use the Bulk() method if providing a slice where each element is a row.
func (t *Table) Append(rows ...interface{}) error { // rows is already []interface{}
func (t *Table) Append(rows ...interface{}) error {
t.ensureInitialized()
if t.config.Stream.Enable && t.hasPrinted {
// Streaming logic remains unchanged, as AutoHeader is a batch-mode concept.
t.logger.Debugf("Append() called in streaming mode with %d items for a single row", len(rows))
var rowItemForStream interface{}
if len(rows) == 1 {
rowItemForStream = rows[0]
} else {
rowItemForStream = rows // Pass the slice of items if multiple args
rowItemForStream = rows
}
if err := t.streamAppendRow(rowItemForStream); err != nil {
t.logger.Errorf("Error rendering streaming row: %v", err)
return fmt.Errorf("failed to stream append row: %w", err)
return errors.Newf("failed to stream append row").Wrap(err)
}
return nil
}
//Batch Mode Logic
// Batch Mode Logic
t.logger.Debugf("Append (Batch) received %d arguments: %v", len(rows), rows)
var cellsSource interface{}
if len(rows) == 1 {
cellsSource = rows[0]
t.logger.Debug("Append (Batch): Single argument provided. Treating it as the source for row cells.")
} else {
cellsSource = rows // 'rows' is []interface{} containing all arguments
t.logger.Debug("Append (Batch): Multiple arguments provided. Treating them directly as cells for one row.")
cellsSource = rows
}
// Check if we should attempt to auto-generate headers from this append operation.
// Conditions: AutoHeader is on, no headers are set yet, and this is the first data row.
isFirstRow := len(t.rows) == 0
if t.config.Behavior.Structs.AutoHeader.Enabled() && len(t.headers) == 0 && isFirstRow {
t.logger.Debug("Append: Triggering AutoHeader for the first row.")
headers := t.extractHeadersFromStruct(cellsSource)
if len(headers) > 0 {
// Set the extracted headers. The Header() method handles the rest.
t.Header(headers)
}
}
if err := t.appendSingle(cellsSource); err != nil {
// The rest of the function proceeds as before, converting the data to string lines.
lines, err := t.toStringLines(cellsSource, t.config.Row)
if err != nil {
t.logger.Errorf("Append (Batch) failed for cellsSource %v: %v", cellsSource, err)
return err
}
t.rows = append(t.rows, lines)
t.logger.Debugf("Append (Batch) completed for one row, total rows in table: %d", len(t.rows))
return nil
}
// Bulk adds multiple rows from a slice to the table (legacy method).
// Parameter rows must be a slice compatible with stringer or []string.
// Returns an error if the input is invalid or appending fails.
// Bulk adds multiple rows from a slice to the table.
// If Behavior.AutoHeader is enabled, no headers set, and rows is a slice of structs,
// automatically extracts/sets headers from the first struct.
func (t *Table) Bulk(rows interface{}) error {
t.logger.Debug("Starting Bulk operation")
rv := reflect.ValueOf(rows)
if rv.Kind() != reflect.Slice {
err := errors.Newf("Bulk expects a slice, got %T", rows)
t.logger.Debugf("Bulk error: %v", err)
return err
return errors.Newf("Bulk expects a slice, got %T", rows)
}
if rv.Len() == 0 {
return nil
}
// AutoHeader logic remains here, as it's a "Bulk" operation concept.
if t.config.Behavior.Structs.AutoHeader.Enabled() && len(t.headers) == 0 {
first := rv.Index(0).Interface()
// We can now correctly get headers from pointers or embedded structs
headers := t.extractHeadersFromStruct(first)
if len(headers) > 0 {
t.Header(headers)
}
}
// The rest of the logic is now just a loop over Append.
for i := 0; i < rv.Len(); i++ {
row := rv.Index(i).Interface()
t.logger.Debugf("Processing bulk row %d: %v", i, row)
if err := t.appendSingle(row); err != nil {
t.logger.Debugf("Bulk append failed at index %d: %v", i, err)
if err := t.Append(row); err != nil { // Use Append
return err
}
}
t.logger.Debugf("Bulk completed, processed %d rows", rv.Len())
return nil
}
@@ -1383,13 +1404,13 @@ func (t *Table) render() error {
if err != nil {
t.writer = originalWriter
t.logger.Errorf("prepareContexts failed: %v", err)
return fmt.Errorf("failed to prepare table contexts: %w", err)
return errors.Newf("failed to prepare table contexts").Wrap(err)
}
if err := ctx.renderer.Start(t.writer); err != nil {
t.writer = originalWriter
t.logger.Errorf("Renderer Start() error: %v", err)
return fmt.Errorf("renderer start failed: %w", err)
return errors.Newf("renderer start failed").Wrap(err)
}
renderError := false
@@ -1404,7 +1425,7 @@ func (t *Table) render() error {
if renderErr := renderFn(ctx, mctx); renderErr != nil {
t.logger.Errorf("Renderer section error (%s): %v", sectionName, renderErr)
if !renderError {
firstRenderErr = fmt.Errorf("failed to render %s section: %w", sectionName, renderErr)
firstRenderErr = errors.Newf("failed to render %s section", sectionName).Wrap(renderErr)
}
renderError = true
break
@@ -1414,7 +1435,7 @@ func (t *Table) render() error {
if closeErr := ctx.renderer.Close(); closeErr != nil {
t.logger.Errorf("Renderer Close() error: %v", closeErr)
if !renderError {
firstRenderErr = fmt.Errorf("renderer close failed: %w", closeErr)
firstRenderErr = errors.Newf("renderer close failed").Wrap(closeErr)
}
renderError = true
}

View File

@@ -141,6 +141,18 @@ type Compact struct {
Merge State // Merge enables compact width calculation during cell merging, optimizing space allocation.
}
// Struct holds settings for struct-based operations like AutoHeader.
type Struct struct {
// AutoHeader automatically extracts and sets headers from struct fields when Bulk is called with a slice of structs.
// Uses JSON tags if present, falls back to field names (title-cased). Skips unexported or json:"-" fields.
// Enabled by default for convenience.
AutoHeader State
// Tags is a priority-ordered list of struct tag keys to check for header names.
// The first tag found on a field will be used. Defaults to ["json", "db"].
Tags []string
}
// Behavior defines settings that control table rendering behaviors, such as column visibility and content formatting.
type Behavior struct {
AutoHide State // AutoHide determines whether empty columns are hidden. Ignored in streaming mode.
@@ -152,6 +164,9 @@ type Behavior struct {
// Compact enables optimized width calculation for merged cells, such as in horizontal merges,
// by systematically determining the most efficient width instead of scaling by the number of columns.
Compact Compact
// Structs contains settings for how struct data is processed.
Structs Struct
}
// Padding defines the spacing characters around cell content in all four directions.

View File

@@ -1197,6 +1197,10 @@ func (t *Table) convertToString(value interface{}) string {
// convertItemToCells is responsible for converting a single input item (which could be
// a struct, a basic type, or an item implementing Stringer/Formatter) into a slice
// of strings, where each string represents a cell for the table row.
// zoo.go
// convertItemToCells is responsible for converting a single input item into a slice of strings.
// It now uses the unified struct parser for structs.
func (t *Table) convertItemToCells(item interface{}) ([]string, error) {
t.logger.Debugf("convertItemToCells: Converting item of type %T", item)
@@ -1204,10 +1208,10 @@ func (t *Table) convertItemToCells(item interface{}) ([]string, error) {
if t.stringer != nil {
res, err := t.convertToStringer(item)
if err == nil {
t.logger.Debugf("convertItemToCells: Used custom table stringer (t.stringer) for type %T. Produced %d cells: %v", item, len(res), res)
t.logger.Debugf("convertItemToCells: Used custom table stringer for type %T. Produced %d cells: %v", item, len(res), res)
return res, nil
}
t.logger.Warnf("convertItemToCells: Custom table stringer (t.stringer) was set but incompatible or errored for type %T: %v. Will attempt other conversion methods.", item, err)
t.logger.Warnf("convertItemToCells: Custom table stringer was set but incompatible for type %T: %v. Will attempt other methods.", item, err)
}
// 2. Handle untyped nil directly.
@@ -1216,85 +1220,26 @@ func (t *Table) convertItemToCells(item interface{}) ([]string, error) {
return []string{""}, nil
}
itemValue := reflect.ValueOf(item)
itemType := itemValue.Type()
// 3. Handle pointers: Dereference pointers to get to the underlying struct or value.
if itemType.Kind() == reflect.Ptr {
if itemValue.IsNil() {
t.logger.Debugf("convertItemToCells: Item is a nil pointer of type %s. Returning single empty cell.", itemType.String())
return []string{""}, nil
}
itemValue = itemValue.Elem()
itemType = itemValue.Type()
t.logger.Debugf("convertItemToCells: Dereferenced pointer, now processing type %s.", itemType.String())
// 3. Use the new unified struct parser. It handles pointers and embedding.
// We only care about the values it returns.
_, values := t.extractFieldsAndValuesFromStruct(item)
if values != nil {
t.logger.Debugf("convertItemToCells: Structs %T reflected into %d cells: %v", item, len(values), values)
return values, nil
}
// 4. Special handling for structs:
if itemType.Kind() == reflect.Struct {
// Check if the original item (before potential dereference) implements Formatter or Stringer.
if formatter, ok := item.(tw.Formatter); ok {
t.logger.Debugf("convertItemToCells: Struct item (type %s) is tw.Formatter. Using Format(). Resulting in 1 cell.", itemType.Name())
return []string{formatter.Format()}, nil
}
if stringer, ok := item.(fmt.Stringer); ok {
t.logger.Debugf("convertItemToCells: Struct item (type %s) is fmt.Stringer. Using String(). Resulting in 1 cell.", itemType.Name())
return []string{stringer.String()}, nil
}
t.logger.Debugf("convertItemToCells: Item is a struct (type %s). Attempting generic field reflection to expand into multiple cells.", itemType.Name())
numFields := itemValue.NumField()
structCells := make([]string, 0, numFields)
hasProcessableFields := false
for i := 0; i < numFields; i++ {
fieldMeta := itemType.Field(i)
if fieldMeta.PkgPath != "" {
t.logger.Debugf("convertItemToCells: Skipping unexported field %s in struct %s", fieldMeta.Name, itemType.Name())
continue
}
hasProcessableFields = true // Mark true if we encounter any exported field
jsonTag := fieldMeta.Tag.Get("json")
if jsonTag == "-" {
t.logger.Debugf("convertItemToCells: Skipping field %s in struct %s due to json:\"-\" tag", fieldMeta.Name, itemType.Name())
continue
}
fieldReflectedValue := itemValue.Field(i)
if strings.Contains(jsonTag, ",omitempty") && fieldReflectedValue.IsZero() {
t.logger.Debugf("convertItemToCells: Omitting zero value for field %s in struct %s due to omitempty tag", fieldMeta.Name, itemType.Name())
structCells = append(structCells, "")
continue
}
structCells = append(structCells, t.convertToString(fieldReflectedValue.Interface()))
}
// Only return expanded cells if there were processable fields.
// If a struct has no exported fields, or all were skipped via json:"-",
// it should still produce output (e.g. fmt.Sprintf of the struct) rather than an empty row.
if hasProcessableFields {
t.logger.Debugf("convertItemToCells: Struct %s reflected into %d cells: %v", itemType.Name(), len(structCells), structCells)
return structCells, nil
}
t.logger.Warnf("convertItemToCells: Struct %s has no processable exported fields. Falling back to Sprintf for the whole item (resulting in 1 cell).", itemType.Name())
return []string{t.convertToString(item)}, nil // 'item' is the original potentially pointer type
}
// 5. Item is NOT a struct. It might be a basic type or a non-struct type implementing Formatter/Stringer.
// These should all result in a single cell.
// 4. Fallback for any other single item (e.g., basic types, or types that implement Stringer/Formatter).
// This code path is now for non-struct types.
if formatter, ok := item.(tw.Formatter); ok {
t.logger.Debugf("convertItemToCells: Item (non-struct, type %T) is tw.Formatter. Using Format(). Resulting in 1 cell.", item)
t.logger.Debugf("convertItemToCells: Item (non-struct, type %T) is tw.Formatter. Using Format().", item)
return []string{formatter.Format()}, nil
}
if stringer, ok := item.(fmt.Stringer); ok {
t.logger.Debugf("convertItemToCells: Item (non-struct, type %T) is fmt.Stringer. Using String(). Resulting in 1 cell.", item)
t.logger.Debugf("convertItemToCells: Item (non-struct, type %T) is fmt.Stringer. Using String().", item)
return []string{stringer.String()}, nil
}
// 6. Fallback for any other single item (e.g., basic types like int, string, bool):
t.logger.Debugf("convertItemToCells: Item (type %T) is a basic type or unhandled by other mechanisms. Treating as single cell via convertToString.", item)
t.logger.Debugf("convertItemToCells: Item (type %T) is a basic type. Treating as single cell via convertToString.", item)
return []string{t.convertToString(item)}, nil
}
@@ -1694,3 +1639,92 @@ func (t *Table) updateWidths(row []string, widths tw.Mapper[int, int], padding t
}
}
}
// extractHeadersFromStruct is now a thin wrapper around the new unified function.
// It only cares about the header names.
func (t *Table) extractHeadersFromStruct(sample interface{}) []string {
headers, _ := t.extractFieldsAndValuesFromStruct(sample)
return headers
}
// extractFieldsAndValuesFromStruct is the new single source of truth for struct reflection.
// It recursively processes a struct, handling pointers and embedded structs,
// and returns two slices: one for header names and one for string-converted values.
func (t *Table) extractFieldsAndValuesFromStruct(sample interface{}) ([]string, []string) {
v := reflect.ValueOf(sample)
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return nil, nil
}
v = v.Elem()
}
if v.Kind() != reflect.Struct {
return nil, nil
}
typ := v.Type()
headers := make([]string, 0, typ.NumField())
values := make([]string, 0, typ.NumField())
for i := 0; i < typ.NumField(); i++ {
field := typ.Field(i)
fieldValue := v.Field(i)
// Skip unexported fields
if field.PkgPath != "" {
continue
}
// Handle embedded structs recursively
if field.Anonymous {
h, val := t.extractFieldsAndValuesFromStruct(fieldValue.Interface())
if h != nil {
headers = append(headers, h...)
values = append(values, val...)
}
continue
}
var tagName string
skipField := false
// Loop through the priority list of configured tags (e.g., ["json", "db"])
for _, tagKey := range t.config.Behavior.Structs.Tags {
tagValue := field.Tag.Get(tagKey)
// If a tag is found...
if tagValue != "" {
// If the tag is "-", this field should be skipped entirely.
if tagValue == "-" {
skipField = true
break // Stop processing tags for this field.
}
// Otherwise, we've found our highest-priority tag. Store it and stop.
tagName = tagValue
break // Stop processing tags for this field.
}
}
// If the field was marked for skipping, continue to the next field.
if skipField {
continue
}
// Determine header name from the tag or fallback to the field name
headerName := field.Name
if tagName != "" {
headerName = strings.Split(tagName, ",")[0]
}
headers = append(headers, tw.Title(headerName))
// Determine value, respecting omitempty from the found tag
value := ""
if !strings.Contains(tagName, ",omitempty") || !fieldValue.IsZero() {
value = t.convertToString(fieldValue.Interface())
}
values = append(values, value)
}
return headers, values
}

6
vendor/modules.txt vendored
View File

@@ -985,15 +985,15 @@ github.com/nxadm/tail/winfile
# github.com/oklog/run v1.2.0
## explicit; go 1.20
github.com/oklog/run
# github.com/olekukonko/errors v0.0.0-20250405072817-4e6d85265da6
# github.com/olekukonko/errors v1.1.0
## explicit; go 1.21
github.com/olekukonko/errors
# github.com/olekukonko/ll v0.0.8
# github.com/olekukonko/ll v0.0.9
## explicit; go 1.21
github.com/olekukonko/ll
github.com/olekukonko/ll/lh
github.com/olekukonko/ll/lx
# github.com/olekukonko/tablewriter v1.0.8
# github.com/olekukonko/tablewriter v1.0.9
## explicit; go 1.21
github.com/olekukonko/tablewriter
github.com/olekukonko/tablewriter/pkg/twwarp